repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/modules/cp.py
recv
python
def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret
Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L63-L89
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
recv_chunked
python
def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass
This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L92-L154
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def uncompress(data):\n buf = BytesIO(data)\n with open_fileobj(buf, 'rb') as igz:\n unc = igz.read()\n return unc\n", "def _error(msg):\n __context__['retcode'] = 1\n return msg\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
_mk_client
python
def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__)
Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L157-L167
null
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
get_file
python
def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip)
.. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L235-L301
[ "def hash_file(path, saltenv='base'):\n '''\n Return the hash of a file, to get the hash of a file on the\n salt master file server prepend the path with salt://<file on server>\n otherwise, prepend the file with / for a local file.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.hash_file salt://path/to/file\n '''\n path, senv = salt.utils.url.split_env(path)\n if senv:\n saltenv = senv\n\n return _client().hash_file(path, saltenv)\n", "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def _render_filenames(path, dest, saltenv, template, **kw):\n '''\n Process markup in the :param:`path` and :param:`dest` variables (NOT the\n files under the paths they ultimately point to) according to the markup\n format provided by :param:`template`.\n '''\n if not template:\n return (path, dest)\n\n # render the path as a template using path_template_engine as the engine\n if template not in salt.utils.templates.TEMPLATE_REGISTRY:\n raise CommandExecutionError(\n 'Attempted to render file paths with unavailable engine '\n '{0}'.format(template)\n )\n\n kwargs = {}\n kwargs['salt'] = __salt__\n if 'pillarenv' in kw or 'pillar' in kw:\n pillarenv = kw.get('pillarenv', __opts__.get('pillarenv'))\n kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar'))\n else:\n kwargs['pillar'] = __pillar__\n kwargs['grains'] = __grains__\n kwargs['opts'] = __opts__\n kwargs['saltenv'] = saltenv\n\n def _render(contents):\n '''\n Render :param:`contents` into a literal pathname by writing it to a\n temp file, rendering that file, and returning the result.\n '''\n # write out path to temp file\n tmp_path_fn = salt.utils.files.mkstemp()\n with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:\n fp_.write(salt.utils.stringutils.to_str(contents))\n data = salt.utils.templates.TEMPLATE_REGISTRY[template](\n tmp_path_fn,\n to_str=True,\n **kwargs\n )\n salt.utils.files.safe_rm(tmp_path_fn)\n if not data['result']:\n # Failed to render the template\n raise CommandExecutionError(\n 'Failed to render file path with error: {0}'.format(\n data['data']\n )\n )\n else:\n return data['data']\n\n path = _render(path)\n dest = _render(dest)\n return (path, dest)\n", "def split_env(url):\n '''\n remove the saltenv query parameter from a 'salt://' url\n '''\n if not url.startswith('salt://'):\n return url, None\n\n path, senv = parse(url)\n return create(path), senv\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
get_template
python
def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs)
Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L317-L348
[ "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
get_dir
python
def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip)
Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L351-L365
[ "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def _render_filenames(path, dest, saltenv, template, **kw):\n '''\n Process markup in the :param:`path` and :param:`dest` variables (NOT the\n files under the paths they ultimately point to) according to the markup\n format provided by :param:`template`.\n '''\n if not template:\n return (path, dest)\n\n # render the path as a template using path_template_engine as the engine\n if template not in salt.utils.templates.TEMPLATE_REGISTRY:\n raise CommandExecutionError(\n 'Attempted to render file paths with unavailable engine '\n '{0}'.format(template)\n )\n\n kwargs = {}\n kwargs['salt'] = __salt__\n if 'pillarenv' in kw or 'pillar' in kw:\n pillarenv = kw.get('pillarenv', __opts__.get('pillarenv'))\n kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar'))\n else:\n kwargs['pillar'] = __pillar__\n kwargs['grains'] = __grains__\n kwargs['opts'] = __opts__\n kwargs['saltenv'] = saltenv\n\n def _render(contents):\n '''\n Render :param:`contents` into a literal pathname by writing it to a\n temp file, rendering that file, and returning the result.\n '''\n # write out path to temp file\n tmp_path_fn = salt.utils.files.mkstemp()\n with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:\n fp_.write(salt.utils.stringutils.to_str(contents))\n data = salt.utils.templates.TEMPLATE_REGISTRY[template](\n tmp_path_fn,\n to_str=True,\n **kwargs\n )\n salt.utils.files.safe_rm(tmp_path_fn)\n if not data['result']:\n # Failed to render the template\n raise CommandExecutionError(\n 'Failed to render file path with error: {0}'.format(\n data['data']\n )\n )\n else:\n return data['data']\n\n path = _render(path)\n dest = _render(dest)\n return (path, dest)\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
get_url
python
def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result
.. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L368-L426
[ "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def redact_http_basic_auth(output):\n '''\n Remove HTTP user and password\n '''\n # We can't use re.compile because re.compile(someregex).sub() doesn't\n # support flags even in Python 2.7.\n url_re = '(https?)://.*@'\n redacted = r'\\1://<redacted>@'\n if sys.version_info >= (2, 7):\n # re.sub() supports flags as of 2.7, use this to do a case-insensitive\n # match.\n return re.sub(url_re, redacted, output, flags=re.IGNORECASE)\n else:\n # We're on python 2.6, test if a lowercased version of the output\n # string matches the regex...\n if re.search(url_re, output.lower()):\n # ... and if it does, perform the regex substitution.\n return re.sub(url_re, redacted, output.lower())\n # No match, just return the original string\n return output\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
get_file_str
python
def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_
Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L429-L449
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def cache_file(path, saltenv='base', source_hash=None):\n '''\n Used to cache a single file on the Minion\n\n Returns the location of the new cached file on the Minion\n\n source_hash\n If ``name`` is an http(s) or ftp URL and the file exists in the\n minion's file cache, this option can be passed to keep the minion from\n re-downloading the file if the cached copy matches the specified hash.\n\n .. versionadded:: 2018.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.cache_file salt://path/to/file\n\n There are two ways of defining the fileserver environment (a.k.a.\n ``saltenv``) from which to cache the file. One is to use the ``saltenv``\n parameter, and the other is to use a querystring syntax in the ``salt://``\n URL. The below two examples are equivalent:\n\n .. code-block:: bash\n\n salt '*' cp.cache_file salt://foo/bar.conf saltenv=config\n salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config\n\n If the path being cached is a ``salt://`` URI, and the path does not exist,\n then ``False`` will be returned.\n\n .. note::\n It may be necessary to quote the URL when using the querystring method,\n depending on the shell being used to run the command.\n '''\n path = salt.utils.data.decode(path)\n saltenv = salt.utils.data.decode(saltenv)\n\n contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv)\n\n path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp')\n try:\n if path_is_remote and contextkey in __context__:\n # Prevent multiple caches in the same salt run. Affects remote URLs\n # since the master won't know their hash, so the fileclient\n # wouldn't be able to prevent multiple caches if we try to cache\n # the remote URL more than once.\n if os.path.isfile(__context__[contextkey]):\n return __context__[contextkey]\n else:\n # File is in __context__ but no longer exists in the minion\n # cache, get rid of the context key and re-cache below.\n # Accounts for corner case where file is removed from minion\n # cache between cp.cache_file calls in the same salt-run.\n __context__.pop(contextkey)\n except AttributeError:\n pass\n\n path, senv = salt.utils.url.split_env(path)\n if senv:\n saltenv = senv\n\n result = _client().cache_file(path, saltenv, source_hash=source_hash)\n if not result:\n log.error(\n 'Unable to cache file \\'%s\\' from saltenv \\'%s\\'.',\n path, saltenv\n )\n if path_is_remote:\n # Cache was successful, store the result in __context__ to prevent\n # multiple caches (see above).\n __context__[contextkey] = result\n return result\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
cache_file
python
def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result
Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L452-L525
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n", "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def split_env(url):\n '''\n remove the saltenv query parameter from a 'salt://' url\n '''\n if not url.startswith('salt://'):\n return url, None\n\n path, senv = parse(url)\n return create(path), senv\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
cache_dir
python
def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat )
Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L590-L625
[ "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
cache_local_file
python
def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path)
Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L641-L665
[ "def hash_file(path, saltenv='base'):\n '''\n Return the hash of a file, to get the hash of a file on the\n salt master file server prepend the path with salt://<file on server>\n otherwise, prepend the file with / for a local file.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.hash_file salt://path/to/file\n '''\n path, senv = salt.utils.url.split_env(path)\n if senv:\n saltenv = senv\n\n return _client().hash_file(path, saltenv)\n", "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def is_cached(path, saltenv='base'):\n '''\n Return a boolean if the given path on the master has been cached on the\n minion\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.is_cached salt://path/to/file\n '''\n return _client().is_cached(path, saltenv)\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
hash_file
python
def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv)
Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L747-L763
[ "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def split_env(url):\n '''\n remove the saltenv query parameter from a 'salt://' url\n '''\n if not url.startswith('salt://'):\n return url, None\n\n path, senv = parse(url)\n return create(path), senv\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
stat_file
python
def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0]
Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L766-L785
[ "def _client():\n '''\n Return a client, hashed by the list of masters\n '''\n _mk_client()\n return __context__['cp.fileclient_{0}'.format(id(__opts__))]\n", "def split_env(url):\n '''\n remove the saltenv query parameter from a 'salt://' url\n '''\n if not url.startswith('salt://'):\n return url, None\n\n path, senv = parse(url)\n return create(path), senv\n", "def st_mode_to_octal(mode):\n '''\n Convert the st_mode value from a stat(2) call (as returned from os.stat())\n to an octal mode.\n '''\n try:\n return oct(mode)[-4:]\n except (TypeError, IndexError):\n return ''\n" ]
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close() def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
push
python
def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close()
WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L788-L880
null
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
saltstack/salt
salt/modules/cp.py
push_dir
python
def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L883-L931
null
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import base64 import errno import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.path import salt.utils.templates import salt.utils.url import salt.crypt import salt.transport.client from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. ''' if 'retcode' not in __context__: __context__['retcode'] = 0 def _error(msg): __context__['retcode'] = 1 return msg if chunk is None: # dest is an empty dir and needs to be created try: os.makedirs(dest) except OSError as exc: if exc.errno == errno.EEXIST: if os.path.isfile(dest): return 'Path exists and is a file' else: return _error(exc.__str__()) return True chunk = base64.b64decode(chunk) open_mode = 'ab' if append else 'wb' try: fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 except (IOError, OSError) as exc: if exc.errno != errno.ENOENT: # Parent dir does not exist, we need to create it return _error(exc.__str__()) try: os.makedirs(os.path.dirname(dest)) except (IOError, OSError) as makedirs_exc: # Failed to make directory return _error(makedirs_exc.__str__()) fh_ = salt.utils.files.fopen(dest, open_mode) # pylint: disable=W8470 try: # Write the chunk to disk fh_.write(salt.utils.gzip_util.uncompress(chunk) if compressed else chunk) except (IOError, OSError) as exc: # Write failed return _error(exc.__str__()) else: # Write successful if not append and mode is not None: # If this is the first chunk we're writing, set the mode #log.debug('Setting mode for %s to %s', dest, oct(mode)) log.debug('Setting mode for %s to %s', dest, mode) try: os.chmod(dest, mode) except OSError: return _error(exc.__str__()) return True finally: try: fh_.close() except AttributeError: pass def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.files.mkstemp() with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(contents)) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.files.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def envs(): ''' List available environments for fileserver CLI Example .. code-block:: bash salt '*' cp.envs ''' return _client().envs() def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): try: with salt.utils.files.fopen(fn_, 'r') as fp_: return fp_.read() except IOError: return False return fn_ def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion source_hash If ``name`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' path = salt.utils.data.decode(path) saltenv = salt.utils.data.decode(saltenv) contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( 'Unable to cache file \'%s\' from saltenv \'%s\'.', path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_dest(url, saltenv='base'): ''' .. versionadded:: Neon Returns the expected cache path for the file, if cached using :py:func:`cp.cache_file <salt.modules.cp.cache_file>`. .. note:: This only returns the _expected_ path, it does not tell you if the URL is really cached. To check if the URL is cached, use :py:func:`cp.is_cached <salt.modules.cp.is_cached>` instead. CLI Examples: .. code-block:: bash salt '*' cp.cache_dest https://foo.com/bar.rpm salt '*' cp.cache_dest salt://my/file salt '*' cp.cache_dest salt://my/file saltenv=dev ''' return _client().cache_dest(url, saltenv) def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.files.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'%s\' to master', path) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: %s', upload_path) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'size': os.path.getsize(path), 'tok': auth.gen_token(b'salt')} channel = salt.transport.client.ReqChannel.factory(__opts__) try: with salt.utils.files.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.files.rm_rf(path) log.debug('Removing source file \'%s\'', path) except IOError: log.error('cp.push failed to remove file \'%s\'', path) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True finally: channel.close()
saltstack/salt
salt/states/glance_image.py
present
python
def present(name, auth=None, **kwargs): ''' Ensure image exists and is up-to-date name Name of the image enabled Boolean to control if image is enabled description An arbitrary description of the image ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['glanceng.setup_clouds'](auth) image = __salt__['glanceng.image_get'](name=name) if not image: if __opts__['test']: ret['result'] = None ret['changes'] = kwargs ret['comment'] = 'Image {} will be created.'.format(name) return ret kwargs['name'] = name image = __salt__['glanceng.image_create'](**kwargs) ret['changes'] = image ret['comment'] = 'Created image' return ret # TODO(SamYaple): Compare and update image properties here return ret
Ensure image exists and is up-to-date name Name of the image enabled Boolean to control if image is enabled description An arbitrary description of the image
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glance_image.py#L37-L75
null
# -*- coding: utf-8 -*- ''' Management of OpenStack Glance Images ======================================== .. versionadded:: 2018.3.0 :depends: shade :configuration: see :py:mod:`salt.modules.glanceng` for setup instructions Example States .. code-block:: yaml create image: glance_image.present: - name: cirros - filename: cirros.raw - image_format: raw delete image: glance_image.absent: - name: cirros ''' from __future__ import absolute_import, unicode_literals, print_function __virtualname__ = 'glance_image' def __virtual__(): if 'glanceng.image_get' in __salt__: return __virtualname__ return (False, 'The glanceng execution module failed to load: shade python module is not available') def absent(name, auth=None): ''' Ensure image does not exist name Name of the image ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} __salt__['glanceng.setup_clouds'](auth) image = __salt__['glanceng.image_get'](name=name) if image: if __opts__['test']: ret['result'] = None ret['changes'] = {'name': name} ret['comment'] = 'Image {} will be deleted.'.format(name) return ret __salt__['glanceng.image_delete'](name=image) ret['changes']['id'] = image.id ret['comment'] = 'Deleted image' return ret
saltstack/salt
salt/utils/docker/translate/container.py
_merge_keys
python
def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} }
The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L44-L64
null
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
_post_processing
python
def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue
Additional container-specific post-translation processing
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L67-L137
null
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
binds
python
def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val
On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L145-L160
[ "def split(item, sep=',', maxsplit=-1):\n return [x.strip() for x in item.split(sep, maxsplit)]\n" ]
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
blkio_weight_device
python
def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val
CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L167-L181
[ "def map_vals(val, *names, **extra_opts):\n '''\n Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list\n of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function\n provides common code to handle these instances.\n '''\n fill = extra_opts.pop('fill', NOTSET)\n expected_num_elements = len(names)\n val = translate_stringlist(val)\n for idx, item in enumerate(val):\n if not isinstance(item, dict):\n elements = [x.strip() for x in item.split(':')]\n num_elements = len(elements)\n if num_elements < expected_num_elements:\n if fill is NOTSET:\n raise SaltInvocationError(\n '\\'{0}\\' contains {1} value(s) (expected {2})'.format(\n item, num_elements, expected_num_elements\n )\n )\n elements.extend([fill] * (expected_num_elements - num_elements))\n elif num_elements > expected_num_elements:\n raise SaltInvocationError(\n '\\'{0}\\' contains {1} value(s) (expected {2})'.format(\n item,\n num_elements,\n expected_num_elements if fill is NOTSET\n else 'up to {0}'.format(expected_num_elements)\n )\n )\n val[idx] = dict(zip(names, elements))\n return val\n" ]
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
port_bindings
python
def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val
On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L360-L514
[ "def split(item, sep=',', maxsplit=-1):\n return [x.strip() for x in item.split(sep, maxsplit)]\n", "def validate_ip(val):\n try:\n if not salt.utils.network.is_ip(val):\n raise SaltInvocationError(\n '\\'{0}\\' is not a valid IP address'.format(val)\n )\n except RuntimeError:\n pass\n", "def get_port_range(port_def):\n '''\n Given a port number or range, return a start and end to that range. Port\n ranges are defined as a string containing two numbers separated by a dash\n (e.g. '4505-4506').\n\n A ValueError will be raised if bad input is provided.\n '''\n if isinstance(port_def, six.integer_types):\n # Single integer, start/end of range is the same\n return port_def, port_def\n try:\n comps = [int(x) for x in split(port_def, '-')]\n if len(comps) == 1:\n range_start = range_end = comps[0]\n else:\n range_start, range_end = comps\n if range_start > range_end:\n raise ValueError('start > end')\n except (TypeError, ValueError) as exc:\n if exc.__str__() == 'start > end':\n msg = (\n 'Start of port range ({0}) cannot be greater than end of '\n 'port range ({1})'.format(range_start, range_end)\n )\n else:\n msg = '\\'{0}\\' is non-numeric or an invalid port range'.format(\n port_def\n )\n raise ValueError(msg)\n else:\n return range_start, range_end\n" ]
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
ports
python
def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports)
Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L517-L554
[ "def split(item, sep=',', maxsplit=-1):\n return [x.strip() for x in item.split(sep, maxsplit)]\n", "def get_port_range(port_def):\n '''\n Given a port number or range, return a start and end to that range. Port\n ranges are defined as a string containing two numbers separated by a dash\n (e.g. '4505-4506').\n\n A ValueError will be raised if bad input is provided.\n '''\n if isinstance(port_def, six.integer_types):\n # Single integer, start/end of range is the same\n return port_def, port_def\n try:\n comps = [int(x) for x in split(port_def, '-')]\n if len(comps) == 1:\n range_start = range_end = comps[0]\n else:\n range_start, range_end = comps\n if range_start > range_end:\n raise ValueError('start > end')\n except (TypeError, ValueError) as exc:\n if exc.__str__() == 'start > end':\n msg = (\n 'Start of port range ({0}) cannot be greater than end of '\n 'port range ({1})'.format(range_start, range_end)\n )\n else:\n msg = '\\'{0}\\' is non-numeric or an invalid port range'.format(\n port_def\n )\n raise ValueError(msg)\n else:\n return range_start, range_end\n" ]
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
restart_policy
python
def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val
CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L569-L597
[ "def map_vals(val, *names, **extra_opts):\n '''\n Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list\n of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function\n provides common code to handle these instances.\n '''\n fill = extra_opts.pop('fill', NOTSET)\n expected_num_elements = len(names)\n val = translate_stringlist(val)\n for idx, item in enumerate(val):\n if not isinstance(item, dict):\n elements = [x.strip() for x in item.split(':')]\n num_elements = len(elements)\n if num_elements < expected_num_elements:\n if fill is NOTSET:\n raise SaltInvocationError(\n '\\'{0}\\' contains {1} value(s) (expected {2})'.format(\n item, num_elements, expected_num_elements\n )\n )\n elements.extend([fill] * (expected_num_elements - num_elements))\n elif num_elements > expected_num_elements:\n raise SaltInvocationError(\n '\\'{0}\\' contains {1} value(s) (expected {2})'.format(\n item,\n num_elements,\n expected_num_elements if fill is NOTSET\n else 'up to {0}'.format(expected_num_elements)\n )\n )\n val[idx] = dict(zip(names, elements))\n return val\n" ]
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
user
python
def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val
This can be either a string or a numeric uid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L665-L683
null
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
volumes
python
def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val
Should be a list of absolute paths
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L694-L704
[ "def translate_stringlist(val):\n '''\n On the CLI, these are passed as multiple instances of a given CLI option.\n In Salt, we accept these as a comma-delimited list but the API expects a\n Python list. This function accepts input and returns it back as a Python\n list of strings. If the input is a string which is a comma-separated list\n of items, split that string and return it.\n '''\n if not isinstance(val, list):\n try:\n val = split(val)\n except AttributeError:\n val = split(six.text_type(val))\n for idx in range(len(val)):\n if not isinstance(val[idx], six.string_types):\n val[idx] = six.text_type(val[idx])\n return val\n" ]
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
saltstack/salt
salt/utils/docker/translate/container.py
working_dir
python
def working_dir(val, **kwargs): # pylint: disable=unused-argument ''' Must be an absolute path ''' try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError('\'{0}\' is not an absolute path'.format(val)) return val
Must be an absolute path
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/container.py#L711-L721
null
# -*- coding: utf-8 -*- ''' Functions to translate input for container creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import os # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import helpers from . import helpers ALIASES = { 'cmd': 'command', 'cpuset': 'cpuset_cpus', 'dns_option': 'dns_opt', 'env': 'environment', 'expose': 'ports', 'interactive': 'stdin_open', 'ipc': 'ipc_mode', 'label': 'labels', 'memory': 'mem_limit', 'memory_swap': 'memswap_limit', 'publish': 'port_bindings', 'publish_all': 'publish_all_ports', 'restart': 'restart_policy', 'rm': 'auto_remove', 'sysctl': 'sysctls', 'security_opts': 'security_opt', 'ulimit': 'ulimits', 'user_ns_mode': 'userns_mode', 'volume': 'volumes', 'workdir': 'working_dir', } ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) def _merge_keys(kwargs): ''' The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ''' log_driver = kwargs.pop('log_driver', helpers.NOTSET) log_opt = kwargs.pop('log_opt', helpers.NOTSET) if 'log_config' not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs['log_config'] = { 'Type': log_driver if log_driver is not helpers.NOTSET else 'none', 'Config': log_opt if log_opt is not helpers.NOTSET else {} } def _post_processing(kwargs, skip_translate, invalid): ''' Additional container-specific post-translation processing ''' # Don't allow conflicting options to be set if kwargs.get('port_bindings') is not None \ and kwargs.get('publish_all_ports'): kwargs.pop('port_bindings') invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True' if kwargs.get('hostname') is not None \ and kwargs.get('network_mode') == 'host': kwargs.pop('hostname') invalid['hostname'] = 'Cannot be used when network_mode=True' # Make sure volumes and ports are defined to match the binds and port_bindings if kwargs.get('binds') is not None \ and (skip_translate is True or all(x not in skip_translate for x in ('binds', 'volume', 'volumes'))): # Make sure that all volumes defined in "binds" are included in the # "volumes" param. auto_volumes = [] if isinstance(kwargs['binds'], dict): for val in six.itervalues(kwargs['binds']): try: if 'bind' in val: auto_volumes.append(val['bind']) except TypeError: continue else: if isinstance(kwargs['binds'], list): auto_volume_defs = kwargs['binds'] else: try: auto_volume_defs = helpers.split(kwargs['binds']) except AttributeError: auto_volume_defs = [] for val in auto_volume_defs: try: auto_volumes.append(helpers.split(val, ':')[1]) except IndexError: continue if auto_volumes: actual_volumes = kwargs.setdefault('volumes', []) actual_volumes.extend([x for x in auto_volumes if x not in actual_volumes]) # Sort list to make unit tests more reliable actual_volumes.sort() if kwargs.get('port_bindings') is not None \ and all(x not in skip_translate for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): # TCP ports should only be passed as the port number. Normalize the # input so a port definition of 80/tcp becomes just 80 instead of # (80, 'tcp'). for index, _ in enumerate(kwargs['ports']): try: if kwargs['ports'][index][1] == 'tcp': kwargs['ports'][index] = ports_to_open[index][0] except TypeError: continue # Functions below must match names of docker-py arguments def auto_remove(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def binds(val, **kwargs): # pylint: disable=unused-argument ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python list. ''' if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: raise SaltInvocationError( '\'{0}\' is not a dictionary or list of bind ' 'definitions'.format(val) ) return val def blkio_weight(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of dictionaries in the format [{'Path': path, 'Weight': weight}] ''' val = helpers.map_vals(val, 'Path', 'Weight') for idx in range(len(val)): try: val[idx]['Weight'] = int(val[idx]['Weight']) except (TypeError, ValueError): raise SaltInvocationError( 'Weight \'{Weight}\' for path \'{Path}\' is not an ' 'integer'.format(**val[idx]) ) return val def cap_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def cap_drop(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def command(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def cpu_group(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_period(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def cpu_shares(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def detach(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def device_read_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_read_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def device_write_bps(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=False) def device_write_iops(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_device_rates(val, numeric_rate=True) def devices(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns_search(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def dns(val, **kwargs): val = helpers.translate_stringlist(val) if kwargs.get('validate_ip_addrs', True): for item in val: helpers.validate_ip(item) return val def domainname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_command(val) def environment(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def extra_hosts(val, **kwargs): val = helpers.translate_key_val(val, delimiter=':') if kwargs.get('validate_ip_addrs', True): for key in val: helpers.validate_ip(val[key]) return val def group_add(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def host_config(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def hostname(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def ipc_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def isolation(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def links(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter=':') def log_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def log_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def lxc_conf(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def mac_address(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def mem_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def memswap_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def name(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def network_disabled(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def network_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def pid_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def pids_limit(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def port_bindings(val, **kwargs): ''' On the CLI, these are passed as multiple instances of a given CLI option. In Salt, we accept these as a comma-delimited list but the API expects a Python dictionary mapping ports to their bindings. The format the API expects is complicated depending on whether or not the external port maps to a different internal port, or if the port binding is for UDP instead of TCP (the default). For reference, see the "Port bindings" section in the docker-py documentation at the following URL: http://docker-py.readthedocs.io/en/stable/api.html ''' validate_ip_addrs = kwargs.get('validate_ip_addrs', True) if not isinstance(val, dict): if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: val = helpers.split(six.text_type(val)) for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) def _format_port(port_num, proto): return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num bindings = {} for binding in val: bind_parts = helpers.split(binding, ':') num_bind_parts = len(bind_parts) if num_bind_parts == 1: # Single port or port range being passed through (no # special mapping) container_port = six.text_type(bind_parts[0]) if container_port == '': raise SaltInvocationError( 'Empty port binding definition found' ) container_port, _, proto = container_port.partition('/') try: start, end = helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) bind_vals = [ (_format_port(port_num, proto), None) for port_num in range(start, end + 1) ] elif num_bind_parts == 2: if bind_parts[0] == '': raise SaltInvocationError( 'Empty host port in port binding definition ' '\'{0}\''.format(binding) ) if bind_parts[1] == '': raise SaltInvocationError( 'Empty container port in port binding definition ' '\'{0}\''.format(binding) ) container_port, _, proto = bind_parts[1].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) hport_start, hport_end = \ helpers.get_port_range(bind_parts[0]) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(bind_parts[0], container_port) ) cport_list = list(range(cport_start, cport_end + 1)) hport_list = list(range(hport_start, hport_end + 1)) bind_vals = [ (_format_port(cport_list[x], proto), hport_list[x]) for x in range(len(cport_list)) ] elif num_bind_parts == 3: host_ip, host_port = bind_parts[0:2] if validate_ip_addrs: helpers.validate_ip(host_ip) container_port, _, proto = bind_parts[2].partition('/') try: cport_start, cport_end = \ helpers.get_port_range(container_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) cport_list = list(range(cport_start, cport_end + 1)) if host_port == '': hport_list = [None] * len(cport_list) else: try: hport_start, hport_end = \ helpers.get_port_range(host_port) except ValueError as exc: # Using __str__() to avoid deprecation warning for # using the message attribute of the ValueError. raise SaltInvocationError(exc.__str__()) hport_list = list(range(hport_start, hport_end + 1)) if (hport_end - hport_start) != (cport_end - cport_start): # Port range is mismatched raise SaltInvocationError( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})'.format(host_port, container_port) ) bind_vals = [( _format_port(val, proto), (host_ip,) if hport_list[idx] is None else (host_ip, hport_list[idx]) ) for idx, val in enumerate(cport_list)] else: raise SaltInvocationError( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})'.format( binding, num_bind_parts ) ) for cport, bind_def in bind_vals: if cport not in bindings: bindings[cport] = bind_def else: if isinstance(bindings[cport], list): # Append to existing list of bindings for this # container port. bindings[cport].append(bind_def) else: bindings[cport] = [bindings[cport], bind_def] for idx in range(len(bindings[cport])): if bindings[cport][idx] is None: # Now that we are adding multiple # bindings try: # Convert 1234/udp to 1234 bindings[cport][idx] = int(cport.split('/')[0]) except AttributeError: # Port was tcp, the AttributeError # signifies that the split failed # because the port number was # already defined as an integer. # Just use the cport. bindings[cport][idx] = cport val = bindings return val def ports(val, **kwargs): # pylint: disable=unused-argument ''' Like cap_add, cap_drop, etc., this option can be specified multiple times, and each time can be a port number or port range. Ultimately, the API expects a list, but elements in the list are ints when the port is TCP, and a tuple (port_num, 'udp') when the port is UDP. ''' if not isinstance(val, list): try: val = helpers.split(val) except AttributeError: if isinstance(val, six.integer_types): val = [val] else: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(val) ) new_ports = set() for item in val: if isinstance(item, six.integer_types): new_ports.add(item) continue try: item, _, proto = item.partition('/') except AttributeError: raise SaltInvocationError( '\'{0}\' is not a valid port definition'.format(item) ) try: range_start, range_end = \ helpers.get_port_range(item) except ValueError as exc: # Using __str__() to avoid deprecation warning for using # the "message" attribute of the ValueError. raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def read_only(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def restart_policy(val, **kwargs): # pylint: disable=unused-argument ''' CLI input is in the format NAME[:RETRY_COUNT] but the API expects {'Name': name, 'MaximumRetryCount': retry_count}. We will use the 'fill' kwarg here to make sure the mapped result uses '0' for the count if this optional value was omitted. ''' val = helpers.map_vals( val, 'Name', 'MaximumRetryCount', fill='0') # map_vals() converts the input into a list of dicts, but the API # wants just a dict, so extract the value from the single-element # list. If there was more than one element in the list, then # invalid input was passed (i.e. a comma-separated list, when what # we wanted was a single value). if len(val) != 1: raise SaltInvocationError('Only one policy is permitted') val = val[0] try: # The count needs to be an integer val['MaximumRetryCount'] = int(val['MaximumRetryCount']) except (TypeError, ValueError): # Non-numeric retry count passed raise SaltInvocationError( 'Retry count \'{0}\' is non-numeric'.format(val['MaximumRetryCount']) ) return val def security_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val) def shm_size(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bytes(val) def stdin_open(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def stop_signal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def stop_timeout(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_int(val) def storage_opt(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def sysctls(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def tmpfs(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def tty(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ulimits(val, **kwargs): # pylint: disable=unused-argument val = helpers.translate_stringlist(val) for idx in range(len(val)): if not isinstance(val[idx], dict): try: ulimit_name, limits = \ helpers.split(val[idx], '=', 1) comps = helpers.split(limits, ':', 1) except (AttributeError, ValueError): raise SaltInvocationError( 'Ulimit definition \'{0}\' is not in the format ' 'type=soft_limit[:hard_limit]'.format(val[idx]) ) if len(comps) == 1: comps *= 2 soft_limit, hard_limit = comps try: val[idx] = {'Name': ulimit_name, 'Soft': int(soft_limit), 'Hard': int(hard_limit)} except (TypeError, ValueError): raise SaltInvocationError( 'Limit \'{0}\' contains non-numeric value(s)'.format( val[idx] ) ) return val def user(val, **kwargs): # pylint: disable=unused-argument ''' This can be either a string or a numeric uid ''' if not isinstance(val, six.integer_types): # Try to convert to integer. This will fail if the value is a # username. This is OK, as we check below to make sure that the # value is either a string or integer. Trying to convert to an # integer first though will allow us to catch the edge case in # which a quoted uid is passed (e.g. '1000'). try: val = int(val) except (TypeError, ValueError): pass if not isinstance(val, (six.integer_types, six.string_types)): raise SaltInvocationError('Value must be a username or uid') elif isinstance(val, six.integer_types) and val < 0: raise SaltInvocationError('\'{0}\' is an invalid uid'.format(val)) return val def userns_mode(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volume_driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def volumes(val, **kwargs): # pylint: disable=unused-argument ''' Should be a list of absolute paths ''' val = helpers.translate_stringlist(val) for item in val: if not os.path.isabs(item): raise SaltInvocationError( '\'{0}\' is not an absolute path'.format(item) ) return val def volumes_from(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_stringlist(val)
saltstack/salt
salt/modules/k8s.py
_guess_apiserver
python
def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url
Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L43-L67
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_kpost
python
def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body'))
create any object in kubernetes based on URL
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L70-L85
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_kput
python
def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body'))
put any object in kubernetes based on URL
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L88-L102
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_kpatch
python
def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body'))
patch any object in kubernetes based on URL
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L105-L118
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_kname
python
def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type"
Get name or names out of json result from API server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L121-L131
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_is_dns_subdomain
python
def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False
Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L134-L144
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_is_port_name
python
def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False
Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L147-L157
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_is_dns_label
python
def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False
Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L160-L170
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_get_labels
python
def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {})
Get all labels from a kube node.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L180-L194
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_set_labels
python
def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret
Replace labels dict by a new one
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L197-L207
[ "def _kpatch(url, data):\n ''' patch any object in kubernetes based on URL '''\n\n # Prepare headers\n headers = {\"Content-Type\": \"application/json-patch+json\"}\n # Make request\n ret = http.query(url, method='PATCH', header_dict=headers,\n data=salt.utils.json.dumps(data))\n # Check requests status\n if ret.get('error'):\n log.error(\"Got an error: %s\", ret.get(\"error\"))\n return ret\n else:\n return salt.utils.json.loads(ret.get('body'))\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
get_labels
python
def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret}
.. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L210-L234
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _guess_node_id(node):\n '''Try to guess kube node ID using salt minion ID'''\n if node is None:\n return __salt__['grains.get']('id')\n return node\n", "def _get_labels(node, apiserver_url):\n '''Get all labels from a kube node.'''\n # Prepare URL\n url = \"{0}/api/v1/nodes/{1}\".format(apiserver_url, node)\n # Make request\n ret = http.query(url)\n # Check requests status\n if 'body' in ret:\n ret = salt.utils.json.loads(ret.get('body'))\n elif ret.get('status', 0) == 404:\n return \"Node {0} doesn't exist\".format(node)\n else:\n return ret\n # Get and return labels\n return ret.get('metadata', {}).get('labels', {})\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
label_present
python
def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret
.. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L237-L292
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _guess_node_id(node):\n '''Try to guess kube node ID using salt minion ID'''\n if node is None:\n return __salt__['grains.get']('id')\n return node\n", "def _get_labels(node, apiserver_url):\n '''Get all labels from a kube node.'''\n # Prepare URL\n url = \"{0}/api/v1/nodes/{1}\".format(apiserver_url, node)\n # Make request\n ret = http.query(url)\n # Check requests status\n if 'body' in ret:\n ret = salt.utils.json.loads(ret.get('body'))\n elif ret.get('status', 0) == 404:\n return \"Node {0} doesn't exist\".format(node)\n else:\n return ret\n # Get and return labels\n return ret.get('metadata', {}).get('labels', {})\n", "def _set_labels(node, apiserver_url, labels):\n '''Replace labels dict by a new one'''\n # Prepare URL\n url = \"{0}/api/v1/nodes/{1}\".format(apiserver_url, node)\n # Prepare data\n data = [{\"op\": \"replace\", \"path\": \"/metadata/labels\", \"value\": labels}]\n # Make request\n ret = _kpatch(url, data)\n if ret.get(\"status\") == 404:\n return \"Node {0} doesn't exist\".format(node)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
label_absent
python
def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret
.. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L295-L339
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _guess_node_id(node):\n '''Try to guess kube node ID using salt minion ID'''\n if node is None:\n return __salt__['grains.get']('id')\n return node\n", "def _get_labels(node, apiserver_url):\n '''Get all labels from a kube node.'''\n # Prepare URL\n url = \"{0}/api/v1/nodes/{1}\".format(apiserver_url, node)\n # Make request\n ret = http.query(url)\n # Check requests status\n if 'body' in ret:\n ret = salt.utils.json.loads(ret.get('body'))\n elif ret.get('status', 0) == 404:\n return \"Node {0} doesn't exist\".format(node)\n else:\n return ret\n # Get and return labels\n return ret.get('metadata', {}).get('labels', {})\n", "def _set_labels(node, apiserver_url, labels):\n '''Replace labels dict by a new one'''\n # Prepare URL\n url = \"{0}/api/v1/nodes/{1}\".format(apiserver_url, node)\n # Prepare data\n data = [{\"op\": \"replace\", \"path\": \"/metadata/labels\", \"value\": labels}]\n # Make request\n ret = _kpatch(url, data)\n if ret.get(\"status\") == 404:\n return \"Node {0} doesn't exist\".format(node)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_get_namespaces
python
def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None
Get namespace is namespace is defined otherwise return all namespaces
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L390-L399
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_create_namespace
python
def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret
create namespace on the defined k8s cluster
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L402-L419
[ "def _kpost(url, data):\n ''' create any object in kubernetes based on URL '''\n\n # Prepare headers\n headers = {\"Content-Type\": \"application/json\"}\n # Make request\n log.trace(\"url is: %s, data is: %s\", url, data)\n ret = http.query(url,\n method='POST',\n header_dict=headers,\n data=salt.utils.json.dumps(data))\n # Check requests status\n if ret.get('error'):\n return ret\n else:\n return salt.utils.json.loads(ret.get('body'))\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
create_namespace
python
def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret
.. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L422-L454
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _get_namespaces(apiserver_url, name=\"\"):\n '''Get namespace is namespace is defined otherwise return all namespaces'''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}\".format(apiserver_url, name)\n # Make request\n ret = http.query(url)\n if ret.get(\"body\"):\n return salt.utils.json.loads(ret.get(\"body\"))\n else:\n return None\n", "def _create_namespace(namespace, apiserver_url):\n ''' create namespace on the defined k8s cluster '''\n # Prepare URL\n url = \"{0}/api/v1/namespaces\".format(apiserver_url)\n # Prepare data\n data = {\n \"kind\": \"Namespace\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": namespace,\n }\n }\n log.trace(\"namespace creation requests: %s\", data)\n # Make request\n ret = _kpost(url, data)\n log.trace(\"result is: %s\", ret)\n # Check requests status\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
get_namespaces
python
def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret
.. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L457-L491
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _get_namespaces(apiserver_url, name=\"\"):\n '''Get namespace is namespace is defined otherwise return all namespaces'''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}\".format(apiserver_url, name)\n # Make request\n ret = http.query(url)\n if ret.get(\"body\"):\n return salt.utils.json.loads(ret.get(\"body\"))\n else:\n return None\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_update_secret
python
def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret
Replace secrets data by a new one
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L508-L519
null
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
_create_secret
python
def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret
create namespace on the defined k8s cluster
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L522-L538
[ "def _kpost(url, data):\n ''' create any object in kubernetes based on URL '''\n\n # Prepare headers\n headers = {\"Content-Type\": \"application/json\"}\n # Make request\n log.trace(\"url is: %s, data is: %s\", url, data)\n ret = http.query(url,\n method='POST',\n header_dict=headers,\n data=salt.utils.json.dumps(data))\n # Check requests status\n if ret.get('error'):\n return ret\n else:\n return salt.utils.json.loads(ret.get('body'))\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
get_secrets
python
def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret
Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L571-L593
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _get_secrets(namespace, name, apiserver_url):\n '''Get secrets of the namespace.'''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}/secrets/{2}\".format(apiserver_url,\n namespace, name)\n # Make request\n ret = http.query(url)\n if ret.get(\"body\"):\n return salt.utils.json.loads(ret.get(\"body\"))\n else:\n return None\n", "def _decode_secrets(secrets):\n items = secrets.get(\"items\", [])\n if items:\n for i, secret in enumerate(items):\n log.trace(i, secret)\n for k, v in six.iteritems(secret.get(\"data\", {})):\n items[i]['data'][k] = base64.b64decode(v)\n secrets[\"items\"] = items\n return secrets\n else:\n for k, v in six.iteritems(secrets.get(\"data\", {})):\n secrets['data'][k] = base64.b64decode(v)\n return secrets\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
update_secret
python
def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret
.. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L623-L664
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'):\n '''\n .. versionadded:: 2016.3.0\n\n Create k8s secrets in the defined namespace from the list of files\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' k8s.create_secret namespace_name secret_name sources\n\n salt '*' k8s.create_secret namespace_name secret_name sources\n http://kube-master.cluster.local\n\n sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths.\n\n Example of paths array:\n\n .. code-block:: bash\n\n ['/full/path/filename', \"file:///full/path/filename\", \"salt://secret/storage/file.txt\", \"http://user:password@securesite.com/secret-file.json\"]\n\n Example of dictionaries:\n\n .. code-block:: bash\n\n {\"nameit\": '/full/path/fiename', name2: \"salt://secret/storage/file.txt\"}\n\n optional parameters accepted:\n\n update=[false] default value is false\n if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done.\n In case it is set to \"true\" and secret is present but data is differ - secret will be updated.\n\n force=[true] default value is true\n if the to False, secret will not be created in case one of the files is not\n valid kubernetes secret. e.g. capital letters in secret name or _\n in case force is set to True, wrong files will be skipped but secret will be created any way.\n\n saltenv=['base'] default value is base\n in case 'salt://' path is used, this parameter can change the visibility of files\n\n '''\n ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}\n\n if not sources:\n return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}}\n\n apiserver_url = _guess_apiserver(apiserver_url)\n # we need namespace to create secret in it\n if not _get_namespaces(apiserver_url, namespace):\n if force:\n _create_namespace(namespace, apiserver_url)\n else:\n return {'name': name, 'result': False, 'comment': \"Namespace doesn't exists\", 'changes': {}}\n\n secret = _get_secrets(namespace, name, apiserver_url)\n if secret and not update:\n log.info(\"Secret %s is already present on %s\", name, namespace)\n return {'name': name, 'result': False,\n 'comment': 'Secret {0} is already present'.format(name),\n 'changes': {}}\n\n data = {}\n\n for source in sources:\n log.debug(\"source is: %s\", source)\n if isinstance(source, dict):\n # format is array of dictionaries:\n # [{public_auth: salt://public_key}, {test: \"/tmp/test\"}]\n log.trace(\"source is dictionary: %s\", source)\n for k, v in six.iteritems(source):\n sname, encoded = _source_encode(v, saltenv)\n if sname == encoded == \"\":\n ret['comment'] += \"Source file {0} is missing or name is incorrect\\n\".format(v)\n if force:\n continue\n else:\n return ret\n data[k] = encoded\n elif isinstance(source, six.string_types):\n # expected format is array of filenames\n sname, encoded = _source_encode(source, saltenv)\n if sname == encoded == \"\":\n if force:\n ret['comment'] += \"Source file {0} is missing or name is incorrect\\n\".format(source)\n continue\n else:\n return ret\n data[sname] = encoded\n\n log.trace('secret data is: %s', data)\n\n if secret and update:\n if not data:\n ret[\"comment\"] += \"Could not find source files or your sources are empty\"\n ret[\"result\"] = False\n elif secret.get(\"data\") and data != secret.get(\"data\"):\n res = _update_secret(namespace, name, data, apiserver_url)\n ret['comment'] = 'Updated secret'\n ret['changes'] = 'Updated secret'\n else:\n log.debug(\"Secret has not been changed on cluster, skipping it\")\n ret['comment'] = 'Has not been changed on cluster, skipping it'\n else:\n res = _create_secret(namespace, name, data, apiserver_url)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
create_secret
python
def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret
.. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L667-L774
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _get_namespaces(apiserver_url, name=\"\"):\n '''Get namespace is namespace is defined otherwise return all namespaces'''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}\".format(apiserver_url, name)\n # Make request\n ret = http.query(url)\n if ret.get(\"body\"):\n return salt.utils.json.loads(ret.get(\"body\"))\n else:\n return None\n", "def _create_namespace(namespace, apiserver_url):\n ''' create namespace on the defined k8s cluster '''\n # Prepare URL\n url = \"{0}/api/v1/namespaces\".format(apiserver_url)\n # Prepare data\n data = {\n \"kind\": \"Namespace\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": namespace,\n }\n }\n log.trace(\"namespace creation requests: %s\", data)\n # Make request\n ret = _kpost(url, data)\n log.trace(\"result is: %s\", ret)\n # Check requests status\n return ret\n", "def _get_secrets(namespace, name, apiserver_url):\n '''Get secrets of the namespace.'''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}/secrets/{2}\".format(apiserver_url,\n namespace, name)\n # Make request\n ret = http.query(url)\n if ret.get(\"body\"):\n return salt.utils.json.loads(ret.get(\"body\"))\n else:\n return None\n", "def _create_secret(namespace, name, data, apiserver_url):\n ''' create namespace on the defined k8s cluster '''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}/secrets\".format(apiserver_url, namespace)\n # Prepare data\n request = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": name,\n \"namespace\": namespace,\n },\n \"data\": data\n }\n # Make request\n ret = _kpost(url, request)\n return ret\n", "def _source_encode(source, saltenv):\n try:\n source_url = _urlparse(source)\n except TypeError:\n return '', {}, ('Invalid format for source parameter')\n\n protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file')\n\n log.trace(\"parsed source looks like: %s\", source_url)\n if not source_url.scheme or source_url.scheme == 'file':\n # just a regular file\n filename = os.path.abspath(source_url.path)\n sname = os.path.basename(filename)\n log.debug(\"Source is a regular local file: %s\", source_url.path)\n if _is_dns_subdomain(sname) and _is_valid_secret_file(filename):\n return sname, _file_encode(filename)\n else:\n if source_url.scheme in protos:\n # The source is a file on a server\n filename = __salt__['cp.cache_file'](source, saltenv)\n if not filename:\n log.warning(\"Source file: %s can not be retrieved\", source)\n return \"\", \"\"\n return os.path.basename(filename), _file_encode(filename)\n return \"\", \"\"\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
saltstack/salt
salt/modules/k8s.py
delete_secret
python
def delete_secret(namespace, name, apiserver_url=None, force=True): ''' .. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # we need namespace to delete secret in it if not _get_namespaces(apiserver_url, namespace): return {'name': name, 'result': False, 'comment': "Namespace doesn't exists, can't delete anything there", 'changes': {}} url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) res = http.query(url, method='DELETE') if res.get('body'): ret['comment'] = "Removed secret {0} in {1} namespace".format(name, namespace) return ret
.. versionadded:: 2016.3.0 Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name. CLI Example: .. code-block:: bash salt '*' k8s.delete_secret namespace_name secret_name salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L777-L811
[ "def _guess_apiserver(apiserver_url=None):\n '''Try to guees the kubemaster url from environ,\n then from `/etc/kubernetes/config` file\n '''\n default_config = \"/etc/kubernetes/config\"\n if apiserver_url is not None:\n return apiserver_url\n if \"KUBERNETES_MASTER\" in os.environ:\n apiserver_url = os.environ.get(\"KUBERNETES_MASTER\")\n elif __salt__['config.get']('k8s:master'):\n apiserver_url = __salt__['config.get']('k8s:master')\n elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', \"\"):\n config = __salt__['config.get']('k8s:config', default_config)\n kubeapi_regex = re.compile(\"\"\"KUBE_MASTER=['\"]--master=(.*)['\"]\"\"\",\n re.MULTILINE)\n with salt.utils.files.fopen(config) as fh_k8s:\n for line in fh_k8s.readlines():\n match_line = kubeapi_regex.match(line)\n if match_line:\n apiserver_url = match_line.group(1)\n else:\n # we failed to discover, lets use k8s default address\n apiserver_url = \"http://127.0.0.1:8080\"\n log.debug(\"Discoverd k8s API server address: %s\", apiserver_url)\n return apiserver_url\n", "def _get_namespaces(apiserver_url, name=\"\"):\n '''Get namespace is namespace is defined otherwise return all namespaces'''\n # Prepare URL\n url = \"{0}/api/v1/namespaces/{1}\".format(apiserver_url, name)\n # Make request\n ret = http.query(url)\n if ret.get(\"body\"):\n return salt.utils.json.loads(ret.get(\"body\"))\n else:\n return None\n" ]
# -*- coding: utf-8 -*- ''' Salt module to manage Kubernetes cluster .. versionadded:: 2016.3.0 Roadmap: * Add creation of K8S objects (pod, rc, service, ...) * Add replace of K8S objects (pod, rc, service, ...) * Add deletion of K8S objects (pod, rc, service, ...) * Add rolling update * Add (auto)scalling ''' from __future__ import absolute_import, unicode_literals, print_function import os import re import logging as logger import base64 from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency import salt.utils.files import salt.utils.http as http import salt.utils.json __virtualname__ = 'k8s' # Setup the logger log = logger.getLogger(__name__) def __virtual__(): '''Load load if python-requests is installed.''' return __virtualname__ def _guess_apiserver(apiserver_url=None): '''Try to guees the kubemaster url from environ, then from `/etc/kubernetes/config` file ''' default_config = "/etc/kubernetes/config" if apiserver_url is not None: return apiserver_url if "KUBERNETES_MASTER" in os.environ: apiserver_url = os.environ.get("KUBERNETES_MASTER") elif __salt__['config.get']('k8s:master'): apiserver_url = __salt__['config.get']('k8s:master') elif os.path.exists(default_config) or __salt__['config.get']('k8s:config', ""): config = __salt__['config.get']('k8s:config', default_config) kubeapi_regex = re.compile("""KUBE_MASTER=['"]--master=(.*)['"]""", re.MULTILINE) with salt.utils.files.fopen(config) as fh_k8s: for line in fh_k8s.readlines(): match_line = kubeapi_regex.match(line) if match_line: apiserver_url = match_line.group(1) else: # we failed to discover, lets use k8s default address apiserver_url = "http://127.0.0.1:8080" log.debug("Discoverd k8s API server address: %s", apiserver_url) return apiserver_url def _kpost(url, data): ''' create any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request log.trace("url is: %s, data is: %s", url, data) ret = http.query(url, method='POST', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body')) def _kpatch(url, data): ''' patch any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json-patch+json"} # Make request ret = http.query(url, method='PATCH', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): log.error("Got an error: %s", ret.get("error")) return ret else: return salt.utils.json.loads(ret.get('body')) def _kname(obj): '''Get name or names out of json result from API server''' if isinstance(obj, dict): return [obj.get("metadata", {}).get("name", "")] elif isinstance(obj, (list, tuple)): names = [] for i in obj: names.append(i.get("metadata", {}).get("name", "")) return names else: return "Unknown type" def _is_dns_subdomain(name): ''' Check that name is DNS subdomain: One or more lowercase rfc1035/rfc1123 labels separated by '.' with a maximum length of 253 characters ''' dns_subdomain = re.compile(r"""^[a-z0-9\.-]{1,253}$""") if dns_subdomain.match(name): log.debug("Name: %s is valid DNS subdomain", name) return True else: log.debug("Name: %s is not valid DNS subdomain", name) return False def _is_port_name(name): ''' Check that name is IANA service: An alphanumeric (a-z, and 0-9) string, with a maximum length of 15 characters, with the '-' character allowed anywhere except the first or the last character or adjacent to another '-' character, it must contain at least a (a-z) character ''' port_name = re.compile("""^[a-z0-9]{1,15}$""") if port_name.match(name): return True else: return False def _is_dns_label(name): ''' Check that name is DNS label: An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters, with the '-' character allowed anywhere except the first or last character, suitable for use as a hostname or segment in a domain name ''' dns_label = re.compile(r"""^[a-z0-9][a-z0-9\.-]{1,62}$""") if dns_label.match(name): return True else: return False def _guess_node_id(node): '''Try to guess kube node ID using salt minion ID''' if node is None: return __salt__['grains.get']('id') return node def _get_labels(node, apiserver_url): '''Get all labels from a kube node.''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Make request ret = http.query(url) # Check requests status if 'body' in ret: ret = salt.utils.json.loads(ret.get('body')) elif ret.get('status', 0) == 404: return "Node {0} doesn't exist".format(node) else: return ret # Get and return labels return ret.get('metadata', {}).get('labels', {}) def _set_labels(node, apiserver_url, labels): '''Replace labels dict by a new one''' # Prepare URL url = "{0}/api/v1/nodes/{1}".format(apiserver_url, node) # Prepare data data = [{"op": "replace", "path": "/metadata/labels", "value": labels}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(node) return ret def get_labels(node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get labels from the current node CLI Example: .. code-block:: bash salt '*' k8s.get_labels salt '*' k8s.get_labels kube-node.cluster.local http://kube-master.cluster.local ''' # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_labels(node, apiserver_url) return {"labels": ret} def label_present(name, value, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Set label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_present hw/disktype ssd salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels labels = _get_labels(node, apiserver_url) if name not in labels: # This is a new label ret['changes'] = {name: value} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not create label {0}, please retry".format(name) else: ret['comment'] = "Label {0} created".format(name) elif labels.get(name) != str(value): # This is a old label and we are going to edit it ret['changes'] = {name: str(value)} labels[name] = str(value) res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not update label {0}, please retry".format(name) else: ret['comment'] = "Label {0} updated".format(name) else: # This is a old label and it has already the wanted value ret['comment'] = "Label {0} already set".format(name) return ret def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret def label_folder_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label folder to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_folder_absent hw salt '*' k8s.label_folder_absent hw/ kube-node.cluster.local http://kube-master.cluster.local ''' folder = name.strip("/") + "/" ret = {'name': folder, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if not key.startswith(folder)]) # Prepare a temp labels dict if labels == old_labels: # Label already absent ret['comment'] = "Label folder {0} already absent".format(folder) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label folder {0}, please retry".format(folder) else: ret['changes'] = {"deleted": folder} ret['comment'] = "Label folder {0} absent".format(folder) return ret # Namespaces def _get_namespaces(apiserver_url, name=""): '''Get namespace is namespace is defined otherwise return all namespaces''' # Prepare URL url = "{0}/api/v1/namespaces/{1}".format(apiserver_url, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret def create_namespace(name, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Create kubernetes namespace from the name, similar to the functionality added to kubectl since v.1.2.0: .. code-block:: bash kubectl create namespaces namespace-name CLI Example: .. code-block:: bash salt '*' k8s.create_namespace namespace_name salt '*' k8s.create_namespace namespace_name http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False if not _get_namespaces(apiserver_url, name): # This is a new namespace _create_namespace(name, apiserver_url) ret['changes'] = name ret['comment'] = "Namespace {0} created".format(name) else: ret['comment'] = "Namespace {0} already present".format(name) return ret def get_namespaces(namespace="", apiserver_url=None): ''' .. versionadded:: 2016.3.0 Get one or all kubernetes namespaces. If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example: .. code-block:: bash kubectl get namespaces -o json In case namespace is set by user, the output will be similar to the one from kubectl: .. code-block:: bash kubectl get namespaces namespace_name -o json CLI Example: .. code-block:: bash salt '*' k8s.get_namespaces salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data ret = _get_namespaces(apiserver_url, namespace) return ret # Secrets def _get_secrets(namespace, name, apiserver_url): '''Get secrets of the namespace.''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Make request ret = http.query(url) if ret.get("body"): return salt.utils.json.loads(ret.get("body")) else: return None def _update_secret(namespace, name, data, apiserver_url): '''Replace secrets data by a new one''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url, namespace, name) # Prepare data data = [{"op": "replace", "path": "/data", "value": data}] # Make request ret = _kpatch(url, data) if ret.get("status") == 404: return "Node {0} doesn't exist".format(url) return ret def _create_secret(namespace, name, data, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces/{1}/secrets".format(apiserver_url, namespace) # Prepare data request = { "apiVersion": "v1", "kind": "Secret", "metadata": { "name": name, "namespace": namespace, }, "data": data } # Make request ret = _kpost(url, request) return ret def _is_valid_secret_file(filename): if os.path.exists(filename) and os.path.isfile(filename): log.debug("File: %s is valid secret file", filename) return True log.warning("File: %s does not exists or not file", filename) return False def _file_encode(filename): log.trace("Encoding secret file: %s", filename) with salt.utils.files.fopen(filename, "rb") as f: data = f.read() return base64.b64encode(data) def _decode_secrets(secrets): items = secrets.get("items", []) if items: for i, secret in enumerate(items): log.trace(i, secret) for k, v in six.iteritems(secret.get("data", {})): items[i]['data'][k] = base64.b64decode(v) secrets["items"] = items return secrets else: for k, v in six.iteritems(secrets.get("data", {})): secrets['data'][k] = base64.b64decode(v) return secrets def get_secrets(namespace, name="", apiserver_url=None, decode=False, brief=False): ''' Get k8s namespaces CLI Example: .. code-block:: bash salt '*' k8s.get_secrets namespace_name salt '*' k8s.get_secrets namespace_name secret_name http://kube-master.cluster.local ''' # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get data if not decode: ret = _get_secrets(namespace, name, apiserver_url) else: ret = _decode_secrets(_get_secrets(namespace, name, apiserver_url)) return ret def _source_encode(source, saltenv): try: source_url = _urlparse(source) except TypeError: return '', {}, ('Invalid format for source parameter') protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') log.trace("parsed source looks like: %s", source_url) if not source_url.scheme or source_url.scheme == 'file': # just a regular file filename = os.path.abspath(source_url.path) sname = os.path.basename(filename) log.debug("Source is a regular local file: %s", source_url.path) if _is_dns_subdomain(sname) and _is_valid_secret_file(filename): return sname, _file_encode(filename) else: if source_url.scheme in protos: # The source is a file on a server filename = __salt__['cp.cache_file'](source, saltenv) if not filename: log.warning("Source file: %s can not be retrieved", source) return "", "" return os.path.basename(filename), _file_encode(filename) return "", "" def update_secret(namespace, name, sources, apiserver_url=None, force=True, saltenv='base'): ''' .. versionadded:: 2016.3.0 alias to k8s.create_secret with update=true CLI Example: .. code-block:: bash salt '*' k8s.update_secret namespace_name secret_name sources [apiserver_url] [force=true] [update=false] [saltenv='base'] sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' apiserver_url = _guess_apiserver(apiserver_url) ret = create_secret(namespace, name, sources, apiserver_url=apiserver_url, force=force, update=True, saltenv=saltenv) return ret def create_secret(namespace, name, sources, apiserver_url=None, force=False, update=False, saltenv='base'): ''' .. versionadded:: 2016.3.0 Create k8s secrets in the defined namespace from the list of files CLI Example: .. code-block:: bash salt '*' k8s.create_secret namespace_name secret_name sources salt '*' k8s.create_secret namespace_name secret_name sources http://kube-master.cluster.local sources are either dictionary of {name: path, name1: path} pairs or array of strings defining paths. Example of paths array: .. code-block:: bash ['/full/path/filename', "file:///full/path/filename", "salt://secret/storage/file.txt", "http://user:password@securesite.com/secret-file.json"] Example of dictionaries: .. code-block:: bash {"nameit": '/full/path/fiename', name2: "salt://secret/storage/file.txt"} optional parameters accepted: update=[false] default value is false if set to false, and secret is already present on the cluster - warning will be returned and no changes to the secret will be done. In case it is set to "true" and secret is present but data is differ - secret will be updated. force=[true] default value is true if the to False, secret will not be created in case one of the files is not valid kubernetes secret. e.g. capital letters in secret name or _ in case force is set to True, wrong files will be skipped but secret will be created any way. saltenv=['base'] default value is base in case 'salt://' path is used, this parameter can change the visibility of files ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not sources: return {'name': name, 'result': False, 'comment': 'No source available', 'changes': {}} apiserver_url = _guess_apiserver(apiserver_url) # we need namespace to create secret in it if not _get_namespaces(apiserver_url, namespace): if force: _create_namespace(namespace, apiserver_url) else: return {'name': name, 'result': False, 'comment': "Namespace doesn't exists", 'changes': {}} secret = _get_secrets(namespace, name, apiserver_url) if secret and not update: log.info("Secret %s is already present on %s", name, namespace) return {'name': name, 'result': False, 'comment': 'Secret {0} is already present'.format(name), 'changes': {}} data = {} for source in sources: log.debug("source is: %s", source) if isinstance(source, dict): # format is array of dictionaries: # [{public_auth: salt://public_key}, {test: "/tmp/test"}] log.trace("source is dictionary: %s", source) for k, v in six.iteritems(source): sname, encoded = _source_encode(v, saltenv) if sname == encoded == "": ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(v) if force: continue else: return ret data[k] = encoded elif isinstance(source, six.string_types): # expected format is array of filenames sname, encoded = _source_encode(source, saltenv) if sname == encoded == "": if force: ret['comment'] += "Source file {0} is missing or name is incorrect\n".format(source) continue else: return ret data[sname] = encoded log.trace('secret data is: %s', data) if secret and update: if not data: ret["comment"] += "Could not find source files or your sources are empty" ret["result"] = False elif secret.get("data") and data != secret.get("data"): res = _update_secret(namespace, name, data, apiserver_url) ret['comment'] = 'Updated secret' ret['changes'] = 'Updated secret' else: log.debug("Secret has not been changed on cluster, skipping it") ret['comment'] = 'Has not been changed on cluster, skipping it' else: res = _create_secret(namespace, name, data, apiserver_url) return ret
saltstack/salt
salt/pillar/__init__.py
get_pillar
python
def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, pillar_override=None, pillarenv=None, extra_minion_data=None): ''' Return the correct pillar driver based on the file_client option ''' file_client = opts['file_client'] if opts.get('master_type') == 'disable' and file_client == 'remote': file_client = 'local' ptype = { 'remote': RemotePillar, 'local': Pillar }.get(file_client, Pillar) # If local pillar and we're caching, run through the cache system first log.debug('Determining pillar cache') if opts['pillar_cache']: log.info('Compiling pillar from cache') log.debug('get_pillar using pillar cache with ext: %s', ext) return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv) return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv, extra_minion_data=extra_minion_data)
Return the correct pillar driver based on the file_client option
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L45-L66
null
# -*- coding: utf-8 -*- ''' Render the pillar data ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import fnmatch import os import collections import logging import tornado.gen import sys import traceback import inspect # Import salt libs import salt.loader import salt.fileclient import salt.minion import salt.crypt import salt.transport.client import salt.utils.args import salt.utils.cache import salt.utils.crypt import salt.utils.data import salt.utils.dictupdate import salt.utils.url from salt.exceptions import SaltClientError from salt.template import compile_template from salt.utils.odict import OrderedDict from salt.version import __version__ # Even though dictupdate is imported, invoking salt.utils.dictupdate.merge here # causes an UnboundLocalError. This should be investigated and fixed, but until # then, leave the import directly below this comment intact. from salt.utils.dictupdate import merge # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # TODO: migrate everyone to this one! def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, pillar_override=None, pillarenv=None, extra_minion_data=None): ''' Return the correct pillar driver based on the file_client option ''' file_client = opts['file_client'] if opts.get('master_type') == 'disable' and file_client == 'remote': file_client = 'local' ptype = { 'remote': AsyncRemotePillar, 'local': AsyncPillar, }.get(file_client, AsyncPillar) return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv, extra_minion_data=extra_minion_data) class RemotePillarMixin(object): ''' Common remote pillar functionality ''' def get_ext_pillar_extra_minion_data(self, opts): ''' Returns the extra data from the minion's opts dict (the config file). This data will be passed to external pillar functions. ''' def get_subconfig(opts_key): ''' Returns a dict containing the opts key subtree, while maintaining the opts structure ''' ret_dict = aux_dict = {} config_val = opts subkeys = opts_key.split(':') # Build an empty dict with the opts path for subkey in subkeys[:-1]: aux_dict[subkey] = {} aux_dict = aux_dict[subkey] if not config_val.get(subkey): # The subkey is not in the config return {} config_val = config_val[subkey] if subkeys[-1] not in config_val: return {} aux_dict[subkeys[-1]] = config_val[subkeys[-1]] return ret_dict extra_data = {} if 'pass_to_ext_pillars' in opts: if not isinstance(opts['pass_to_ext_pillars'], list): log.exception('\'pass_to_ext_pillars\' config is malformed.') raise SaltClientError('\'pass_to_ext_pillars\' config is ' 'malformed.') for key in opts['pass_to_ext_pillars']: salt.utils.dictupdate.update(extra_data, get_subconfig(key), recursive_update=True, merge_lists=True) log.trace('ext_pillar_extra_data = %s', extra_data) return extra_data class AsyncRemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['saltenv'] = saltenv self.ext = ext self.grains = grains self.minion_id = minion_id self.channel = salt.transport.client.AsyncReqChannel.factory(opts) if pillarenv is not None: self.opts['pillarenv'] = pillarenv self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') salt.utils.dictupdate.update(self.extra_minion_data, self.get_ext_pillar_extra_minion_data(opts), recursive_update=True, merge_lists=True) self._closing = False @tornado.gen.coroutine def compile_pillar(self): ''' Return a future which will contain the pillar data from the master ''' load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext try: ret_pillar = yield self.channel.crypted_transfer_decode_dictentry( load, dictkey='pillar', ) except Exception: log.exception('Exception getting pillar:') raise SaltClientError('Exception getting pillar.') if not isinstance(ret_pillar, dict): msg = ('Got a bad pillar from master, type {0}, expecting dict: ' '{1}').format(type(ret_pillar).__name__, ret_pillar) log.error(msg) # raise an exception! Pillar isn't empty, we can't sync it! raise SaltClientError(msg) raise tornado.gen.Return(ret_pillar) def destroy(self): if self._closing: return self._closing = True self.channel.close() def __del__(self): self.destroy() class RemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['saltenv'] = saltenv self.ext = ext self.grains = grains self.minion_id = minion_id self.channel = salt.transport.client.ReqChannel.factory(opts) if pillarenv is not None: self.opts['pillarenv'] = pillarenv self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') salt.utils.dictupdate.update(self.extra_minion_data, self.get_ext_pillar_extra_minion_data(opts), recursive_update=True, merge_lists=True) self._closing = False def compile_pillar(self): ''' Return the pillar data from the master ''' load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext ret_pillar = self.channel.crypted_transfer_decode_dictentry(load, dictkey='pillar', ) if not isinstance(ret_pillar, dict): log.error( 'Got a bad pillar from master, type %s, expecting dict: %s', type(ret_pillar).__name__, ret_pillar ) return {} return ret_pillar def destroy(self): if hasattr(self, '_closing') and self._closing: return self._closing = True self.channel.close() def __del__(self): self.destroy() class PillarCache(object): ''' Return a cached pillar if it exists, otherwise cache it. Pillar caches are structed in two diminensions: minion_id with a dict of saltenvs. Each saltenv contains a pillar dict Example data structure: ``` {'minion_1': {'base': {'pilar_key_1' 'pillar_val_1'} } ''' # TODO ABC? def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): # Yes, we need all of these because we need to route to the Pillar object # if we have no cache. This is another refactor target. # Go ahead and assign these because they may be needed later self.opts = opts self.grains = grains self.minion_id = minion_id self.ext = ext self.functions = functions self.pillar_override = pillar_override self.pillarenv = pillarenv if saltenv is None: self.saltenv = 'base' else: self.saltenv = saltenv # Determine caching backend self.cache = salt.utils.cache.CacheFactory.factory( self.opts['pillar_cache_backend'], self.opts['pillar_cache_ttl'], minion_cache_path=self._minion_cache_path(minion_id)) def _minion_cache_path(self, minion_id): ''' Return the path to the cache file for the minion. Used only for disk-based backends ''' return os.path.join(self.opts['cachedir'], 'pillar_cache', minion_id) def fetch_pillar(self): ''' In the event of a cache miss, we need to incur the overhead of caching a new pillar. ''' log.debug('Pillar cache getting external pillar with ext: %s', self.ext) fresh_pillar = Pillar(self.opts, self.grains, self.minion_id, self.saltenv, ext=self.ext, functions=self.functions, pillarenv=self.pillarenv) return fresh_pillar.compile_pillar() def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs ''' Compile pillar and set it to the cache, if not found. :param args: :param kwargs: :return: ''' log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv) log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*') # Check the cache! if self.minion_id in self.cache: # Keyed by minion_id # TODO Compare grains, etc? if self.pillarenv in self.cache[self.minion_id]: # We have a cache hit! Send it back. log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv) pillar_data = self.cache[self.minion_id][self.pillarenv] else: # We found the minion but not the env. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id][self.pillarenv] = pillar_data self.cache.store() log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id) else: # We haven't seen this minion yet in the cache. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id] = {self.pillarenv: pillar_data} log.debug('Pillar cache has been added for minion %s', self.minion_id) log.debug('Current pillar cache: %s', self.cache[self.minion_id]) # we dont want the pillar_override baked into the cached fetch_pillar from above if self.pillar_override: pillar_data = merge( pillar_data, self.pillar_override, self.opts.get('pillar_source_merging_strategy', 'smart'), self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) pillar_data.update(self.pillar_override) return pillar_data class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy() # TODO: actually migrate from Pillar to AsyncPillar to allow for futures in # ext_pillar etc. class AsyncPillar(Pillar): @tornado.gen.coroutine def compile_pillar(self, ext=True): ret = super(AsyncPillar, self).compile_pillar(ext=ext) raise tornado.gen.Return(ret)
saltstack/salt
salt/pillar/__init__.py
get_async_pillar
python
def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, pillar_override=None, pillarenv=None, extra_minion_data=None): ''' Return the correct pillar driver based on the file_client option ''' file_client = opts['file_client'] if opts.get('master_type') == 'disable' and file_client == 'remote': file_client = 'local' ptype = { 'remote': AsyncRemotePillar, 'local': AsyncPillar, }.get(file_client, AsyncPillar) return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv, extra_minion_data=extra_minion_data)
Return the correct pillar driver based on the file_client option
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L70-L85
null
# -*- coding: utf-8 -*- ''' Render the pillar data ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import fnmatch import os import collections import logging import tornado.gen import sys import traceback import inspect # Import salt libs import salt.loader import salt.fileclient import salt.minion import salt.crypt import salt.transport.client import salt.utils.args import salt.utils.cache import salt.utils.crypt import salt.utils.data import salt.utils.dictupdate import salt.utils.url from salt.exceptions import SaltClientError from salt.template import compile_template from salt.utils.odict import OrderedDict from salt.version import __version__ # Even though dictupdate is imported, invoking salt.utils.dictupdate.merge here # causes an UnboundLocalError. This should be investigated and fixed, but until # then, leave the import directly below this comment intact. from salt.utils.dictupdate import merge # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, pillar_override=None, pillarenv=None, extra_minion_data=None): ''' Return the correct pillar driver based on the file_client option ''' file_client = opts['file_client'] if opts.get('master_type') == 'disable' and file_client == 'remote': file_client = 'local' ptype = { 'remote': RemotePillar, 'local': Pillar }.get(file_client, Pillar) # If local pillar and we're caching, run through the cache system first log.debug('Determining pillar cache') if opts['pillar_cache']: log.info('Compiling pillar from cache') log.debug('get_pillar using pillar cache with ext: %s', ext) return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv) return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv, extra_minion_data=extra_minion_data) # TODO: migrate everyone to this one! class RemotePillarMixin(object): ''' Common remote pillar functionality ''' def get_ext_pillar_extra_minion_data(self, opts): ''' Returns the extra data from the minion's opts dict (the config file). This data will be passed to external pillar functions. ''' def get_subconfig(opts_key): ''' Returns a dict containing the opts key subtree, while maintaining the opts structure ''' ret_dict = aux_dict = {} config_val = opts subkeys = opts_key.split(':') # Build an empty dict with the opts path for subkey in subkeys[:-1]: aux_dict[subkey] = {} aux_dict = aux_dict[subkey] if not config_val.get(subkey): # The subkey is not in the config return {} config_val = config_val[subkey] if subkeys[-1] not in config_val: return {} aux_dict[subkeys[-1]] = config_val[subkeys[-1]] return ret_dict extra_data = {} if 'pass_to_ext_pillars' in opts: if not isinstance(opts['pass_to_ext_pillars'], list): log.exception('\'pass_to_ext_pillars\' config is malformed.') raise SaltClientError('\'pass_to_ext_pillars\' config is ' 'malformed.') for key in opts['pass_to_ext_pillars']: salt.utils.dictupdate.update(extra_data, get_subconfig(key), recursive_update=True, merge_lists=True) log.trace('ext_pillar_extra_data = %s', extra_data) return extra_data class AsyncRemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['saltenv'] = saltenv self.ext = ext self.grains = grains self.minion_id = minion_id self.channel = salt.transport.client.AsyncReqChannel.factory(opts) if pillarenv is not None: self.opts['pillarenv'] = pillarenv self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') salt.utils.dictupdate.update(self.extra_minion_data, self.get_ext_pillar_extra_minion_data(opts), recursive_update=True, merge_lists=True) self._closing = False @tornado.gen.coroutine def compile_pillar(self): ''' Return a future which will contain the pillar data from the master ''' load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext try: ret_pillar = yield self.channel.crypted_transfer_decode_dictentry( load, dictkey='pillar', ) except Exception: log.exception('Exception getting pillar:') raise SaltClientError('Exception getting pillar.') if not isinstance(ret_pillar, dict): msg = ('Got a bad pillar from master, type {0}, expecting dict: ' '{1}').format(type(ret_pillar).__name__, ret_pillar) log.error(msg) # raise an exception! Pillar isn't empty, we can't sync it! raise SaltClientError(msg) raise tornado.gen.Return(ret_pillar) def destroy(self): if self._closing: return self._closing = True self.channel.close() def __del__(self): self.destroy() class RemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['saltenv'] = saltenv self.ext = ext self.grains = grains self.minion_id = minion_id self.channel = salt.transport.client.ReqChannel.factory(opts) if pillarenv is not None: self.opts['pillarenv'] = pillarenv self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') salt.utils.dictupdate.update(self.extra_minion_data, self.get_ext_pillar_extra_minion_data(opts), recursive_update=True, merge_lists=True) self._closing = False def compile_pillar(self): ''' Return the pillar data from the master ''' load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext ret_pillar = self.channel.crypted_transfer_decode_dictentry(load, dictkey='pillar', ) if not isinstance(ret_pillar, dict): log.error( 'Got a bad pillar from master, type %s, expecting dict: %s', type(ret_pillar).__name__, ret_pillar ) return {} return ret_pillar def destroy(self): if hasattr(self, '_closing') and self._closing: return self._closing = True self.channel.close() def __del__(self): self.destroy() class PillarCache(object): ''' Return a cached pillar if it exists, otherwise cache it. Pillar caches are structed in two diminensions: minion_id with a dict of saltenvs. Each saltenv contains a pillar dict Example data structure: ``` {'minion_1': {'base': {'pilar_key_1' 'pillar_val_1'} } ''' # TODO ABC? def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): # Yes, we need all of these because we need to route to the Pillar object # if we have no cache. This is another refactor target. # Go ahead and assign these because they may be needed later self.opts = opts self.grains = grains self.minion_id = minion_id self.ext = ext self.functions = functions self.pillar_override = pillar_override self.pillarenv = pillarenv if saltenv is None: self.saltenv = 'base' else: self.saltenv = saltenv # Determine caching backend self.cache = salt.utils.cache.CacheFactory.factory( self.opts['pillar_cache_backend'], self.opts['pillar_cache_ttl'], minion_cache_path=self._minion_cache_path(minion_id)) def _minion_cache_path(self, minion_id): ''' Return the path to the cache file for the minion. Used only for disk-based backends ''' return os.path.join(self.opts['cachedir'], 'pillar_cache', minion_id) def fetch_pillar(self): ''' In the event of a cache miss, we need to incur the overhead of caching a new pillar. ''' log.debug('Pillar cache getting external pillar with ext: %s', self.ext) fresh_pillar = Pillar(self.opts, self.grains, self.minion_id, self.saltenv, ext=self.ext, functions=self.functions, pillarenv=self.pillarenv) return fresh_pillar.compile_pillar() def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs ''' Compile pillar and set it to the cache, if not found. :param args: :param kwargs: :return: ''' log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv) log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*') # Check the cache! if self.minion_id in self.cache: # Keyed by minion_id # TODO Compare grains, etc? if self.pillarenv in self.cache[self.minion_id]: # We have a cache hit! Send it back. log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv) pillar_data = self.cache[self.minion_id][self.pillarenv] else: # We found the minion but not the env. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id][self.pillarenv] = pillar_data self.cache.store() log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id) else: # We haven't seen this minion yet in the cache. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id] = {self.pillarenv: pillar_data} log.debug('Pillar cache has been added for minion %s', self.minion_id) log.debug('Current pillar cache: %s', self.cache[self.minion_id]) # we dont want the pillar_override baked into the cached fetch_pillar from above if self.pillar_override: pillar_data = merge( pillar_data, self.pillar_override, self.opts.get('pillar_source_merging_strategy', 'smart'), self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) pillar_data.update(self.pillar_override) return pillar_data class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy() # TODO: actually migrate from Pillar to AsyncPillar to allow for futures in # ext_pillar etc. class AsyncPillar(Pillar): @tornado.gen.coroutine def compile_pillar(self, ext=True): ret = super(AsyncPillar, self).compile_pillar(ext=ext) raise tornado.gen.Return(ret)
saltstack/salt
salt/pillar/__init__.py
RemotePillarMixin.get_ext_pillar_extra_minion_data
python
def get_ext_pillar_extra_minion_data(self, opts): ''' Returns the extra data from the minion's opts dict (the config file). This data will be passed to external pillar functions. ''' def get_subconfig(opts_key): ''' Returns a dict containing the opts key subtree, while maintaining the opts structure ''' ret_dict = aux_dict = {} config_val = opts subkeys = opts_key.split(':') # Build an empty dict with the opts path for subkey in subkeys[:-1]: aux_dict[subkey] = {} aux_dict = aux_dict[subkey] if not config_val.get(subkey): # The subkey is not in the config return {} config_val = config_val[subkey] if subkeys[-1] not in config_val: return {} aux_dict[subkeys[-1]] = config_val[subkeys[-1]] return ret_dict extra_data = {} if 'pass_to_ext_pillars' in opts: if not isinstance(opts['pass_to_ext_pillars'], list): log.exception('\'pass_to_ext_pillars\' config is malformed.') raise SaltClientError('\'pass_to_ext_pillars\' config is ' 'malformed.') for key in opts['pass_to_ext_pillars']: salt.utils.dictupdate.update(extra_data, get_subconfig(key), recursive_update=True, merge_lists=True) log.trace('ext_pillar_extra_data = %s', extra_data) return extra_data
Returns the extra data from the minion's opts dict (the config file). This data will be passed to external pillar functions.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L92-L131
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def get_subconfig(opts_key):\n '''\n Returns a dict containing the opts key subtree, while maintaining\n the opts structure\n '''\n ret_dict = aux_dict = {}\n config_val = opts\n subkeys = opts_key.split(':')\n # Build an empty dict with the opts path\n for subkey in subkeys[:-1]:\n aux_dict[subkey] = {}\n aux_dict = aux_dict[subkey]\n if not config_val.get(subkey):\n # The subkey is not in the config\n return {}\n config_val = config_val[subkey]\n if subkeys[-1] not in config_val:\n return {}\n aux_dict[subkeys[-1]] = config_val[subkeys[-1]]\n return ret_dict\n" ]
class RemotePillarMixin(object): ''' Common remote pillar functionality '''
saltstack/salt
salt/pillar/__init__.py
AsyncRemotePillar.compile_pillar
python
def compile_pillar(self): ''' Return a future which will contain the pillar data from the master ''' load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext try: ret_pillar = yield self.channel.crypted_transfer_decode_dictentry( load, dictkey='pillar', ) except Exception: log.exception('Exception getting pillar:') raise SaltClientError('Exception getting pillar.') if not isinstance(ret_pillar, dict): msg = ('Got a bad pillar from master, type {0}, expecting dict: ' '{1}').format(type(ret_pillar).__name__, ret_pillar) log.error(msg) # raise an exception! Pillar isn't empty, we can't sync it! raise SaltClientError(msg) raise tornado.gen.Return(ret_pillar)
Return a future which will contain the pillar data from the master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L163-L192
null
class AsyncRemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['saltenv'] = saltenv self.ext = ext self.grains = grains self.minion_id = minion_id self.channel = salt.transport.client.AsyncReqChannel.factory(opts) if pillarenv is not None: self.opts['pillarenv'] = pillarenv self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') salt.utils.dictupdate.update(self.extra_minion_data, self.get_ext_pillar_extra_minion_data(opts), recursive_update=True, merge_lists=True) self._closing = False @tornado.gen.coroutine def destroy(self): if self._closing: return self._closing = True self.channel.close() def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
RemotePillar.compile_pillar
python
def compile_pillar(self): ''' Return the pillar data from the master ''' load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['saltenv'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext ret_pillar = self.channel.crypted_transfer_decode_dictentry(load, dictkey='pillar', ) if not isinstance(ret_pillar, dict): log.error( 'Got a bad pillar from master, type %s, expecting dict: %s', type(ret_pillar).__name__, ret_pillar ) return {} return ret_pillar
Return the pillar data from the master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L233-L257
null
class RemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['saltenv'] = saltenv self.ext = ext self.grains = grains self.minion_id = minion_id self.channel = salt.transport.client.ReqChannel.factory(opts) if pillarenv is not None: self.opts['pillarenv'] = pillarenv self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') salt.utils.dictupdate.update(self.extra_minion_data, self.get_ext_pillar_extra_minion_data(opts), recursive_update=True, merge_lists=True) self._closing = False def destroy(self): if hasattr(self, '_closing') and self._closing: return self._closing = True self.channel.close() def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
PillarCache.fetch_pillar
python
def fetch_pillar(self): ''' In the event of a cache miss, we need to incur the overhead of caching a new pillar. ''' log.debug('Pillar cache getting external pillar with ext: %s', self.ext) fresh_pillar = Pillar(self.opts, self.grains, self.minion_id, self.saltenv, ext=self.ext, functions=self.functions, pillarenv=self.pillarenv) return fresh_pillar.compile_pillar()
In the event of a cache miss, we need to incur the overhead of caching a new pillar.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L318-L331
[ "def compile_pillar(self, ext=True):\n '''\n Render the pillar data and return\n '''\n top, top_errors = self.get_top()\n if ext:\n if self.opts.get('ext_pillar_first', False):\n self.opts['pillar'], errors = self.ext_pillar(self.pillar_override)\n self.rend = salt.loader.render(self.opts, self.functions)\n matches = self.top_matches(top)\n pillar, errors = self.render_pillar(matches, errors=errors)\n pillar = merge(\n self.opts['pillar'],\n pillar,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n else:\n matches = self.top_matches(top)\n pillar, errors = self.render_pillar(matches)\n pillar, errors = self.ext_pillar(pillar, errors=errors)\n else:\n matches = self.top_matches(top)\n pillar, errors = self.render_pillar(matches)\n errors.extend(top_errors)\n if self.opts.get('pillar_opts', False):\n mopts = dict(self.opts)\n if 'grains' in mopts:\n mopts.pop('grains')\n mopts['saltversion'] = __version__\n pillar['master'] = mopts\n if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False):\n pillar = merge(\n self.opts['pillar'],\n pillar,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n if errors:\n for error in errors:\n log.critical('Pillar render error: %s', error)\n pillar['_errors'] = errors\n\n if self.pillar_override:\n pillar = merge(\n pillar,\n self.pillar_override,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n\n decrypt_errors = self.decrypt_pillar(pillar)\n if decrypt_errors:\n pillar.setdefault('_errors', []).extend(decrypt_errors)\n\n return pillar\n" ]
class PillarCache(object): ''' Return a cached pillar if it exists, otherwise cache it. Pillar caches are structed in two diminensions: minion_id with a dict of saltenvs. Each saltenv contains a pillar dict Example data structure: ``` {'minion_1': {'base': {'pilar_key_1' 'pillar_val_1'} } ''' # TODO ABC? def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): # Yes, we need all of these because we need to route to the Pillar object # if we have no cache. This is another refactor target. # Go ahead and assign these because they may be needed later self.opts = opts self.grains = grains self.minion_id = minion_id self.ext = ext self.functions = functions self.pillar_override = pillar_override self.pillarenv = pillarenv if saltenv is None: self.saltenv = 'base' else: self.saltenv = saltenv # Determine caching backend self.cache = salt.utils.cache.CacheFactory.factory( self.opts['pillar_cache_backend'], self.opts['pillar_cache_ttl'], minion_cache_path=self._minion_cache_path(minion_id)) def _minion_cache_path(self, minion_id): ''' Return the path to the cache file for the minion. Used only for disk-based backends ''' return os.path.join(self.opts['cachedir'], 'pillar_cache', minion_id) def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs ''' Compile pillar and set it to the cache, if not found. :param args: :param kwargs: :return: ''' log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv) log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*') # Check the cache! if self.minion_id in self.cache: # Keyed by minion_id # TODO Compare grains, etc? if self.pillarenv in self.cache[self.minion_id]: # We have a cache hit! Send it back. log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv) pillar_data = self.cache[self.minion_id][self.pillarenv] else: # We found the minion but not the env. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id][self.pillarenv] = pillar_data self.cache.store() log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id) else: # We haven't seen this minion yet in the cache. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id] = {self.pillarenv: pillar_data} log.debug('Pillar cache has been added for minion %s', self.minion_id) log.debug('Current pillar cache: %s', self.cache[self.minion_id]) # we dont want the pillar_override baked into the cached fetch_pillar from above if self.pillar_override: pillar_data = merge( pillar_data, self.pillar_override, self.opts.get('pillar_source_merging_strategy', 'smart'), self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) pillar_data.update(self.pillar_override) return pillar_data
saltstack/salt
salt/pillar/__init__.py
PillarCache.compile_pillar
python
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs ''' Compile pillar and set it to the cache, if not found. :param args: :param kwargs: :return: ''' log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv) log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*') # Check the cache! if self.minion_id in self.cache: # Keyed by minion_id # TODO Compare grains, etc? if self.pillarenv in self.cache[self.minion_id]: # We have a cache hit! Send it back. log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv) pillar_data = self.cache[self.minion_id][self.pillarenv] else: # We found the minion but not the env. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id][self.pillarenv] = pillar_data self.cache.store() log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id) else: # We haven't seen this minion yet in the cache. Store it. pillar_data = self.fetch_pillar() self.cache[self.minion_id] = {self.pillarenv: pillar_data} log.debug('Pillar cache has been added for minion %s', self.minion_id) log.debug('Current pillar cache: %s', self.cache[self.minion_id]) # we dont want the pillar_override baked into the cached fetch_pillar from above if self.pillar_override: pillar_data = merge( pillar_data, self.pillar_override, self.opts.get('pillar_source_merging_strategy', 'smart'), self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) pillar_data.update(self.pillar_override) return pillar_data
Compile pillar and set it to the cache, if not found. :param args: :param kwargs: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L333-L374
[ "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n if strategy == 'smart':\n if renderer.split('|')[-1] == 'yamlex' or renderer.startswith('yamlex_'):\n strategy = 'aggregate'\n else:\n strategy = 'recurse'\n\n if strategy == 'list':\n merged = merge_list(obj_a, obj_b)\n elif strategy == 'recurse':\n merged = merge_recurse(obj_a, obj_b, merge_lists)\n elif strategy == 'aggregate':\n #: level = 1 merge at least root data\n merged = merge_aggregate(obj_a, obj_b)\n elif strategy == 'overwrite':\n merged = merge_overwrite(obj_a, obj_b, merge_lists)\n elif strategy == 'none':\n # If we do not want to merge, there is only one pillar passed, so we can safely use the default recurse,\n # we just do not want to log an error\n merged = merge_recurse(obj_a, obj_b)\n else:\n log.warning(\n 'Unknown merging strategy \\'%s\\', fallback to recurse',\n strategy\n )\n merged = merge_recurse(obj_a, obj_b)\n\n return merged\n", "def fetch_pillar(self):\n '''\n In the event of a cache miss, we need to incur the overhead of caching\n a new pillar.\n '''\n log.debug('Pillar cache getting external pillar with ext: %s', self.ext)\n fresh_pillar = Pillar(self.opts,\n self.grains,\n self.minion_id,\n self.saltenv,\n ext=self.ext,\n functions=self.functions,\n pillarenv=self.pillarenv)\n return fresh_pillar.compile_pillar()\n" ]
class PillarCache(object): ''' Return a cached pillar if it exists, otherwise cache it. Pillar caches are structed in two diminensions: minion_id with a dict of saltenvs. Each saltenv contains a pillar dict Example data structure: ``` {'minion_1': {'base': {'pilar_key_1' 'pillar_val_1'} } ''' # TODO ABC? def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): # Yes, we need all of these because we need to route to the Pillar object # if we have no cache. This is another refactor target. # Go ahead and assign these because they may be needed later self.opts = opts self.grains = grains self.minion_id = minion_id self.ext = ext self.functions = functions self.pillar_override = pillar_override self.pillarenv = pillarenv if saltenv is None: self.saltenv = 'base' else: self.saltenv = saltenv # Determine caching backend self.cache = salt.utils.cache.CacheFactory.factory( self.opts['pillar_cache_backend'], self.opts['pillar_cache_ttl'], minion_cache_path=self._minion_cache_path(minion_id)) def _minion_cache_path(self, minion_id): ''' Return the path to the cache file for the minion. Used only for disk-based backends ''' return os.path.join(self.opts['cachedir'], 'pillar_cache', minion_id) def fetch_pillar(self): ''' In the event of a cache miss, we need to incur the overhead of caching a new pillar. ''' log.debug('Pillar cache getting external pillar with ext: %s', self.ext) fresh_pillar = Pillar(self.opts, self.grains, self.minion_id, self.saltenv, ext=self.ext, functions=self.functions, pillarenv=self.pillarenv) return fresh_pillar.compile_pillar()
saltstack/salt
salt/pillar/__init__.py
Pillar.__valid_on_demand_ext_pillar
python
def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True
Check to see if the on demand external pillar is allowed
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L429-L461
null
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.__gather_avail
python
def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail
Gather the lists of available sls data from the master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L463-L470
[ "def _get_envs(self):\n '''\n Pull the file server environments out of the master options\n '''\n envs = set(['base'])\n if 'pillar_roots' in self.opts:\n envs.update(list(self.opts['pillar_roots']))\n return envs\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.__gen_opts
python
def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts
The options need to be altered to conform to the file client
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L472-L510
[ "def __valid_on_demand_ext_pillar(self, opts):\n '''\n Check to see if the on demand external pillar is allowed\n '''\n if not isinstance(self.ext, dict):\n log.error(\n 'On-demand pillar %s is not formatted as a dictionary',\n self.ext\n )\n return False\n\n on_demand = opts.get('on_demand_ext_pillar', [])\n try:\n invalid_on_demand = set([x for x in self.ext if x not in on_demand])\n except TypeError:\n # Prevent traceback when on_demand_ext_pillar option is malformed\n log.error(\n 'The \\'on_demand_ext_pillar\\' configuration option is '\n 'malformed, it should be a list of ext_pillar module names'\n )\n return False\n\n if invalid_on_demand:\n log.error(\n 'The following ext_pillar modules are not allowed for '\n 'on-demand pillar data: %s. Valid on-demand ext_pillar '\n 'modules are: %s. The valid modules can be adjusted by '\n 'setting the \\'on_demand_ext_pillar\\' config option.',\n ', '.join(sorted(invalid_on_demand)),\n ', '.join(on_demand),\n )\n return False\n return True\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar._get_envs
python
def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs
Pull the file server environments out of the master options
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L512-L519
null
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.get_tops
python
def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors
Gather the top files
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L521-L608
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def compile_template(template,\n renderers,\n default,\n blacklist,\n whitelist,\n saltenv='base',\n sls='',\n input_data='',\n **kwargs):\n '''\n Take the path to a template and return the high data structure\n derived from the template.\n\n Helpers:\n\n :param mask_value:\n Mask value for debugging purposes (prevent sensitive information etc)\n example: \"mask_value=\"pass*\". All \"passwd\", \"password\", \"pass\" will\n be masked (as text).\n '''\n\n # if any error occurs, we return an empty dictionary\n ret = {}\n\n log.debug('compile template: %s', template)\n\n if 'env' in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop('env')\n\n if template != ':string:':\n # Template was specified incorrectly\n if not isinstance(template, six.string_types):\n log.error('Template was specified incorrectly: %s', template)\n return ret\n # Template does not exist\n if not os.path.isfile(template):\n log.error('Template does not exist: %s', template)\n return ret\n # Template is an empty file\n if salt.utils.files.is_empty(template):\n log.debug('Template is an empty file: %s', template)\n return ret\n\n with codecs.open(template, encoding=SLS_ENCODING) as ifile:\n # data input to the first render function in the pipe\n input_data = ifile.read()\n if not input_data.strip():\n # Template is nothing but whitespace\n log.error('Template is nothing but whitespace: %s', template)\n return ret\n\n # Get the list of render funcs in the render pipe line.\n render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)\n\n windows_newline = '\\r\\n' in input_data\n\n input_data = StringIO(input_data)\n for render, argline in render_pipe:\n if salt.utils.stringio.is_readable(input_data):\n input_data.seek(0) # pylint: disable=no-member\n render_kwargs = dict(renderers=renderers, tmplpath=template)\n render_kwargs.update(kwargs)\n if argline:\n render_kwargs['argline'] = argline\n start = time.time()\n ret = render(input_data, saltenv, sls, **render_kwargs)\n log.profile(\n 'Time (in seconds) to render \\'%s\\' using \\'%s\\' renderer: %s',\n template,\n render.__module__.split('.')[-1],\n time.time() - start\n )\n if ret is None:\n # The file is empty or is being written elsewhere\n time.sleep(0.01)\n ret = render(input_data, saltenv, sls, **render_kwargs)\n input_data = ret\n if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member\n # If ret is not a StringIO (which means it was rendered using\n # yaml, mako, or another engine which renders to a data\n # structure) we don't want to log this.\n if salt.utils.stringio.is_readable(ret):\n log.debug('Rendered data from file: %s:\\n%s', template,\n salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),\n kwargs.get('mask_value'))) # pylint: disable=no-member\n ret.seek(0) # pylint: disable=no-member\n\n # Preserve newlines from original template\n if windows_newline:\n if salt.utils.stringio.is_readable(ret):\n is_stringio = True\n contents = ret.read()\n else:\n is_stringio = False\n contents = ret\n\n if isinstance(contents, six.string_types):\n if '\\r\\n' not in contents:\n contents = contents.replace('\\n', '\\r\\n')\n ret = StringIO(contents) if is_stringio else contents\n else:\n if is_stringio:\n ret.seek(0)\n return ret\n", "def _get_envs(self):\n '''\n Pull the file server environments out of the master options\n '''\n envs = set(['base'])\n if 'pillar_roots' in self.opts:\n envs.update(list(self.opts['pillar_roots']))\n return envs\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.merge_tops
python
def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders)
Cleanly merge the top files
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L610-L651
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def itervalues(d, **kw):\n return d.itervalues(**kw)\n", "def keys(self):\n 'od.keys() -> list of keys in od'\n return list(self)\n", "def sort_top_targets(self, top, orders):\n '''\n Returns the sorted high data from the merged top files\n '''\n sorted_top = collections.defaultdict(OrderedDict)\n # pylint: disable=cell-var-from-loop\n for saltenv, targets in six.iteritems(top):\n sorted_targets = sorted(targets,\n key=lambda target: orders[saltenv][target])\n for target in sorted_targets:\n sorted_top[saltenv][target] = targets[target]\n # pylint: enable=cell-var-from-loop\n return sorted_top\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.sort_top_targets
python
def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top
Returns the sorted high data from the merged top files
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L653-L665
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.get_top
python
def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors
Returns the high data derived from the top file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L667-L677
[ "def get_tops(self):\n '''\n Gather the top files\n '''\n tops = collections.defaultdict(list)\n include = collections.defaultdict(list)\n done = collections.defaultdict(list)\n errors = []\n # Gather initial top files\n try:\n saltenvs = set()\n if self.opts['pillarenv']:\n # If the specified pillarenv is not present in the available\n # pillar environments, do not cache the pillar top file.\n if self.opts['pillarenv'] not in self.opts['pillar_roots']:\n log.debug(\n 'pillarenv \\'%s\\' not found in the configured pillar '\n 'environments (%s)',\n self.opts['pillarenv'], ', '.join(self.opts['pillar_roots'])\n )\n else:\n saltenvs.add(self.opts['pillarenv'])\n else:\n saltenvs = self._get_envs()\n if self.opts.get('pillar_source_merging_strategy', None) == \"none\":\n saltenvs &= set([self.saltenv or 'base'])\n\n for saltenv in saltenvs:\n top = self.client.cache_file(self.opts['state_top'], saltenv)\n if top:\n tops[saltenv].append(compile_template(\n top,\n self.rend,\n self.opts['renderer'],\n self.opts['renderer_blacklist'],\n self.opts['renderer_whitelist'],\n saltenv=saltenv,\n _pillar_rend=True,\n ))\n except Exception as exc:\n errors.append(\n ('Rendering Primary Top file failed, render error:\\n{0}'\n .format(exc)))\n log.exception('Pillar rendering failed for minion %s', self.minion_id)\n\n # Search initial top files for includes\n for saltenv, ctops in six.iteritems(tops):\n for ctop in ctops:\n if 'include' not in ctop:\n continue\n for sls in ctop['include']:\n include[saltenv].append(sls)\n ctop.pop('include')\n # Go through the includes and pull out the extra tops and add them\n while include:\n pops = []\n for saltenv, states in six.iteritems(include):\n pops.append(saltenv)\n if not states:\n continue\n for sls in states:\n if sls in done[saltenv]:\n continue\n try:\n tops[saltenv].append(\n compile_template(\n self.client.get_state(\n sls,\n saltenv\n ).get('dest', False),\n self.rend,\n self.opts['renderer'],\n self.opts['renderer_blacklist'],\n self.opts['renderer_whitelist'],\n saltenv=saltenv,\n _pillar_rend=True,\n )\n )\n except Exception as exc:\n errors.append(\n ('Rendering Top file {0} failed, render error'\n ':\\n{1}').format(sls, exc))\n done[saltenv].append(sls)\n for saltenv in pops:\n if saltenv in include:\n include.pop(saltenv)\n\n return tops, errors\n", "def merge_tops(self, tops):\n '''\n Cleanly merge the top files\n '''\n top = collections.defaultdict(OrderedDict)\n orders = collections.defaultdict(OrderedDict)\n for ctops in six.itervalues(tops):\n for ctop in ctops:\n for saltenv, targets in six.iteritems(ctop):\n if saltenv == 'include':\n continue\n for tgt in targets:\n matches = []\n states = OrderedDict()\n orders[saltenv][tgt] = 0\n ignore_missing = False\n # handle a pillar sls target written in shorthand form\n if isinstance(ctop[saltenv][tgt], six.string_types):\n ctop[saltenv][tgt] = [ctop[saltenv][tgt]]\n for comp in ctop[saltenv][tgt]:\n if isinstance(comp, dict):\n if 'match' in comp:\n matches.append(comp)\n if 'order' in comp:\n order = comp['order']\n if not isinstance(order, int):\n try:\n order = int(order)\n except ValueError:\n order = 0\n orders[saltenv][tgt] = order\n if comp.get('ignore_missing', False):\n ignore_missing = True\n if isinstance(comp, six.string_types):\n states[comp] = True\n if ignore_missing:\n if saltenv not in self.ignored_pillars:\n self.ignored_pillars[saltenv] = []\n self.ignored_pillars[saltenv].extend(states.keys())\n top[saltenv][tgt] = matches\n top[saltenv][tgt].extend(states)\n return self.sort_top_targets(top, orders)\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.top_matches
python
def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches
Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L679-L705
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.render_pstate
python
def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors
Collect a single pillar sls file and render it
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L707-L859
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n if strategy == 'smart':\n if renderer.split('|')[-1] == 'yamlex' or renderer.startswith('yamlex_'):\n strategy = 'aggregate'\n else:\n strategy = 'recurse'\n\n if strategy == 'list':\n merged = merge_list(obj_a, obj_b)\n elif strategy == 'recurse':\n merged = merge_recurse(obj_a, obj_b, merge_lists)\n elif strategy == 'aggregate':\n #: level = 1 merge at least root data\n merged = merge_aggregate(obj_a, obj_b)\n elif strategy == 'overwrite':\n merged = merge_overwrite(obj_a, obj_b, merge_lists)\n elif strategy == 'none':\n # If we do not want to merge, there is only one pillar passed, so we can safely use the default recurse,\n # we just do not want to log an error\n merged = merge_recurse(obj_a, obj_b)\n else:\n log.warning(\n 'Unknown merging strategy \\'%s\\', fallback to recurse',\n strategy\n )\n merged = merge_recurse(obj_a, obj_b)\n\n return merged\n", "def compile_template(template,\n renderers,\n default,\n blacklist,\n whitelist,\n saltenv='base',\n sls='',\n input_data='',\n **kwargs):\n '''\n Take the path to a template and return the high data structure\n derived from the template.\n\n Helpers:\n\n :param mask_value:\n Mask value for debugging purposes (prevent sensitive information etc)\n example: \"mask_value=\"pass*\". All \"passwd\", \"password\", \"pass\" will\n be masked (as text).\n '''\n\n # if any error occurs, we return an empty dictionary\n ret = {}\n\n log.debug('compile template: %s', template)\n\n if 'env' in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop('env')\n\n if template != ':string:':\n # Template was specified incorrectly\n if not isinstance(template, six.string_types):\n log.error('Template was specified incorrectly: %s', template)\n return ret\n # Template does not exist\n if not os.path.isfile(template):\n log.error('Template does not exist: %s', template)\n return ret\n # Template is an empty file\n if salt.utils.files.is_empty(template):\n log.debug('Template is an empty file: %s', template)\n return ret\n\n with codecs.open(template, encoding=SLS_ENCODING) as ifile:\n # data input to the first render function in the pipe\n input_data = ifile.read()\n if not input_data.strip():\n # Template is nothing but whitespace\n log.error('Template is nothing but whitespace: %s', template)\n return ret\n\n # Get the list of render funcs in the render pipe line.\n render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)\n\n windows_newline = '\\r\\n' in input_data\n\n input_data = StringIO(input_data)\n for render, argline in render_pipe:\n if salt.utils.stringio.is_readable(input_data):\n input_data.seek(0) # pylint: disable=no-member\n render_kwargs = dict(renderers=renderers, tmplpath=template)\n render_kwargs.update(kwargs)\n if argline:\n render_kwargs['argline'] = argline\n start = time.time()\n ret = render(input_data, saltenv, sls, **render_kwargs)\n log.profile(\n 'Time (in seconds) to render \\'%s\\' using \\'%s\\' renderer: %s',\n template,\n render.__module__.split('.')[-1],\n time.time() - start\n )\n if ret is None:\n # The file is empty or is being written elsewhere\n time.sleep(0.01)\n ret = render(input_data, saltenv, sls, **render_kwargs)\n input_data = ret\n if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member\n # If ret is not a StringIO (which means it was rendered using\n # yaml, mako, or another engine which renders to a data\n # structure) we don't want to log this.\n if salt.utils.stringio.is_readable(ret):\n log.debug('Rendered data from file: %s:\\n%s', template,\n salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),\n kwargs.get('mask_value'))) # pylint: disable=no-member\n ret.seek(0) # pylint: disable=no-member\n\n # Preserve newlines from original template\n if windows_newline:\n if salt.utils.stringio.is_readable(ret):\n is_stringio = True\n contents = ret.read()\n else:\n is_stringio = False\n contents = ret\n\n if isinstance(contents, six.string_types):\n if '\\r\\n' not in contents:\n contents = contents.replace('\\n', '\\r\\n')\n ret = StringIO(contents) if is_stringio else contents\n else:\n if is_stringio:\n ret.seek(0)\n return ret\n", "def render_pstate(self, sls, saltenv, mods, defaults=None):\n '''\n Collect a single pillar sls file and render it\n '''\n if defaults is None:\n defaults = {}\n err = ''\n errors = []\n state_data = self.client.get_state(sls, saltenv)\n fn_ = state_data.get('dest', False)\n if not fn_:\n if sls in self.ignored_pillars.get(saltenv, []):\n log.debug('Skipping ignored and missing SLS \\'%s\\' in '\n 'environment \\'%s\\'', sls, saltenv)\n return None, mods, errors\n elif self.opts['pillar_roots'].get(saltenv):\n msg = ('Specified SLS \\'{0}\\' in environment \\'{1}\\' is not'\n ' available on the salt master').format(sls, saltenv)\n log.error(msg)\n errors.append(msg)\n else:\n msg = ('Specified SLS \\'{0}\\' in environment \\'{1}\\' was not '\n 'found. '.format(sls, saltenv))\n if self.opts.get('__git_pillar', False) is True:\n msg += (\n 'This is likely caused by a git_pillar top file '\n 'containing an environment other than the one for the '\n 'branch in which it resides. Each git_pillar '\n 'branch/tag must have its own top file.'\n )\n else:\n msg += (\n 'This could be because SLS \\'{0}\\' is in an '\n 'environment other than \\'{1}\\', but \\'{1}\\' is '\n 'included in that environment\\'s Pillar top file. It '\n 'could also be due to environment \\'{1}\\' not being '\n 'defined in \\'pillar_roots\\'.'.format(sls, saltenv)\n )\n log.debug(msg)\n # return state, mods, errors\n return None, mods, errors\n state = None\n try:\n state = compile_template(fn_,\n self.rend,\n self.opts['renderer'],\n self.opts['renderer_blacklist'],\n self.opts['renderer_whitelist'],\n saltenv,\n sls,\n _pillar_rend=True,\n **defaults)\n except Exception as exc:\n msg = 'Rendering SLS \\'{0}\\' failed, render error:\\n{1}'.format(\n sls, exc\n )\n log.critical(msg, exc_info=True)\n if self.opts.get('pillar_safe_render_error', True):\n errors.append(\n 'Rendering SLS \\'{0}\\' failed. Please see master log for '\n 'details.'.format(sls)\n )\n else:\n errors.append(msg)\n mods.add(sls)\n nstate = None\n if state:\n if not isinstance(state, dict):\n msg = 'SLS \\'{0}\\' does not render to a dictionary'.format(sls)\n log.error(msg)\n errors.append(msg)\n else:\n if 'include' in state:\n if not isinstance(state['include'], list):\n msg = ('Include Declaration in SLS \\'{0}\\' is not '\n 'formed as a list'.format(sls))\n log.error(msg)\n errors.append(msg)\n else:\n # render included state(s)\n include_states = []\n\n matched_pstates = []\n for sub_sls in state.pop('include'):\n if isinstance(sub_sls, dict):\n sub_sls, v = next(six.iteritems(sub_sls))\n defaults = v.get('defaults', {})\n key = v.get('key', None)\n else:\n key = None\n\n try:\n if sub_sls.startswith('.'):\n if state_data.get('source', '').endswith('/init.sls'):\n include_parts = sls.split('.')\n else:\n include_parts = sls.split('.')[:-1]\n sub_sls = '.'.join(include_parts+[sub_sls[1:]])\n matches = fnmatch.filter(\n self.avail[saltenv],\n sub_sls,\n )\n matched_pstates.extend(matches)\n except KeyError:\n errors.extend(\n ['No matching pillar environment for environment '\n '\\'{0}\\' found'.format(saltenv)]\n )\n\n for sub_sls in matched_pstates:\n if sub_sls not in mods:\n nstate, mods, err = self.render_pstate(\n sub_sls,\n saltenv,\n mods,\n defaults\n )\n if nstate:\n if key:\n # If key is x:y, convert it to {x: {y: nstate}}\n for key_fragment in reversed(key.split(\":\")):\n nstate = {\n key_fragment: nstate\n }\n if not self.opts.get('pillar_includes_override_sls', False):\n include_states.append(nstate)\n else:\n state = merge(\n state,\n nstate,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n if err:\n errors += err\n\n if not self.opts.get('pillar_includes_override_sls', False):\n # merge included state(s) with the current state\n # merged last to ensure that its values are\n # authoritative.\n include_states.append(state)\n state = None\n for s in include_states:\n if state is None:\n state = s\n else:\n state = merge(\n state,\n s,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n return state, mods, errors\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.render_pillar
python
def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors
Extract the sls pillar files from the matches and render them into the pillar
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L861-L910
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n if strategy == 'smart':\n if renderer.split('|')[-1] == 'yamlex' or renderer.startswith('yamlex_'):\n strategy = 'aggregate'\n else:\n strategy = 'recurse'\n\n if strategy == 'list':\n merged = merge_list(obj_a, obj_b)\n elif strategy == 'recurse':\n merged = merge_recurse(obj_a, obj_b, merge_lists)\n elif strategy == 'aggregate':\n #: level = 1 merge at least root data\n merged = merge_aggregate(obj_a, obj_b)\n elif strategy == 'overwrite':\n merged = merge_overwrite(obj_a, obj_b, merge_lists)\n elif strategy == 'none':\n # If we do not want to merge, there is only one pillar passed, so we can safely use the default recurse,\n # we just do not want to log an error\n merged = merge_recurse(obj_a, obj_b)\n else:\n log.warning(\n 'Unknown merging strategy \\'%s\\', fallback to recurse',\n strategy\n )\n merged = merge_recurse(obj_a, obj_b)\n\n return merged\n", "def render_pstate(self, sls, saltenv, mods, defaults=None):\n '''\n Collect a single pillar sls file and render it\n '''\n if defaults is None:\n defaults = {}\n err = ''\n errors = []\n state_data = self.client.get_state(sls, saltenv)\n fn_ = state_data.get('dest', False)\n if not fn_:\n if sls in self.ignored_pillars.get(saltenv, []):\n log.debug('Skipping ignored and missing SLS \\'%s\\' in '\n 'environment \\'%s\\'', sls, saltenv)\n return None, mods, errors\n elif self.opts['pillar_roots'].get(saltenv):\n msg = ('Specified SLS \\'{0}\\' in environment \\'{1}\\' is not'\n ' available on the salt master').format(sls, saltenv)\n log.error(msg)\n errors.append(msg)\n else:\n msg = ('Specified SLS \\'{0}\\' in environment \\'{1}\\' was not '\n 'found. '.format(sls, saltenv))\n if self.opts.get('__git_pillar', False) is True:\n msg += (\n 'This is likely caused by a git_pillar top file '\n 'containing an environment other than the one for the '\n 'branch in which it resides. Each git_pillar '\n 'branch/tag must have its own top file.'\n )\n else:\n msg += (\n 'This could be because SLS \\'{0}\\' is in an '\n 'environment other than \\'{1}\\', but \\'{1}\\' is '\n 'included in that environment\\'s Pillar top file. It '\n 'could also be due to environment \\'{1}\\' not being '\n 'defined in \\'pillar_roots\\'.'.format(sls, saltenv)\n )\n log.debug(msg)\n # return state, mods, errors\n return None, mods, errors\n state = None\n try:\n state = compile_template(fn_,\n self.rend,\n self.opts['renderer'],\n self.opts['renderer_blacklist'],\n self.opts['renderer_whitelist'],\n saltenv,\n sls,\n _pillar_rend=True,\n **defaults)\n except Exception as exc:\n msg = 'Rendering SLS \\'{0}\\' failed, render error:\\n{1}'.format(\n sls, exc\n )\n log.critical(msg, exc_info=True)\n if self.opts.get('pillar_safe_render_error', True):\n errors.append(\n 'Rendering SLS \\'{0}\\' failed. Please see master log for '\n 'details.'.format(sls)\n )\n else:\n errors.append(msg)\n mods.add(sls)\n nstate = None\n if state:\n if not isinstance(state, dict):\n msg = 'SLS \\'{0}\\' does not render to a dictionary'.format(sls)\n log.error(msg)\n errors.append(msg)\n else:\n if 'include' in state:\n if not isinstance(state['include'], list):\n msg = ('Include Declaration in SLS \\'{0}\\' is not '\n 'formed as a list'.format(sls))\n log.error(msg)\n errors.append(msg)\n else:\n # render included state(s)\n include_states = []\n\n matched_pstates = []\n for sub_sls in state.pop('include'):\n if isinstance(sub_sls, dict):\n sub_sls, v = next(six.iteritems(sub_sls))\n defaults = v.get('defaults', {})\n key = v.get('key', None)\n else:\n key = None\n\n try:\n if sub_sls.startswith('.'):\n if state_data.get('source', '').endswith('/init.sls'):\n include_parts = sls.split('.')\n else:\n include_parts = sls.split('.')[:-1]\n sub_sls = '.'.join(include_parts+[sub_sls[1:]])\n matches = fnmatch.filter(\n self.avail[saltenv],\n sub_sls,\n )\n matched_pstates.extend(matches)\n except KeyError:\n errors.extend(\n ['No matching pillar environment for environment '\n '\\'{0}\\' found'.format(saltenv)]\n )\n\n for sub_sls in matched_pstates:\n if sub_sls not in mods:\n nstate, mods, err = self.render_pstate(\n sub_sls,\n saltenv,\n mods,\n defaults\n )\n if nstate:\n if key:\n # If key is x:y, convert it to {x: {y: nstate}}\n for key_fragment in reversed(key.split(\":\")):\n nstate = {\n key_fragment: nstate\n }\n if not self.opts.get('pillar_includes_override_sls', False):\n include_states.append(nstate)\n else:\n state = merge(\n state,\n nstate,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n if err:\n errors += err\n\n if not self.opts.get('pillar_includes_override_sls', False):\n # merge included state(s) with the current state\n # merged last to ensure that its values are\n # authoritative.\n include_states.append(state)\n state = None\n for s in include_states:\n if state is None:\n state = s\n else:\n state = merge(\n state,\n s,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n return state, mods, errors\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar._external_pillar_data
python
def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext
Builds actual pillar data structure and updates the ``pillar`` variable
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L912-L946
[ "def get_function_argspec(func, is_class_method=None):\n '''\n A small wrapper around getargspec that also supports callable classes\n :param is_class_method: Pass True if you are sure that the function being passed\n is a class method. The reason for this is that on Python 3\n ``inspect.ismethod`` only returns ``True`` for bound methods,\n while on Python 2, it returns ``True`` for bound and unbound\n methods. So, on Python 3, in case of a class method, you'd\n need the class to which the function belongs to be instantiated\n and this is not always wanted.\n '''\n if not callable(func):\n raise TypeError('{0} is not a callable'.format(func))\n\n if six.PY2:\n if is_class_method is True:\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = inspect.getargspec(func)\n elif inspect.ismethod(func):\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = inspect.getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n else:\n if is_class_method is True:\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = _getargspec(func) # pylint: disable=redefined-variable-type\n elif inspect.ismethod(func):\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = _getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n return aspec\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.ext_pillar
python
def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors
Render the external pillar data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L948-L1026
[ "def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n", "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n if strategy == 'smart':\n if renderer.split('|')[-1] == 'yamlex' or renderer.startswith('yamlex_'):\n strategy = 'aggregate'\n else:\n strategy = 'recurse'\n\n if strategy == 'list':\n merged = merge_list(obj_a, obj_b)\n elif strategy == 'recurse':\n merged = merge_recurse(obj_a, obj_b, merge_lists)\n elif strategy == 'aggregate':\n #: level = 1 merge at least root data\n merged = merge_aggregate(obj_a, obj_b)\n elif strategy == 'overwrite':\n merged = merge_overwrite(obj_a, obj_b, merge_lists)\n elif strategy == 'none':\n # If we do not want to merge, there is only one pillar passed, so we can safely use the default recurse,\n # we just do not want to log an error\n merged = merge_recurse(obj_a, obj_b)\n else:\n log.warning(\n 'Unknown merging strategy \\'%s\\', fallback to recurse',\n strategy\n )\n merged = merge_recurse(obj_a, obj_b)\n\n return merged\n", "def fetch_remotes(self, remotes=None):\n '''\n Fetch all remotes and return a boolean to let the calling function know\n whether or not any remotes were updated in the process of fetching\n '''\n if remotes is None:\n remotes = []\n elif not isinstance(remotes, list):\n log.error(\n 'Invalid \\'remotes\\' argument (%s) for fetch_remotes. '\n 'Must be a list of strings', remotes\n )\n remotes = []\n\n changed = False\n for repo in self.remotes:\n name = getattr(repo, 'name', None)\n if not remotes or (repo.id, name) in remotes:\n try:\n if repo.fetch():\n # We can't just use the return value from repo.fetch()\n # because the data could still have changed if old\n # remotes were cleared above. Additionally, we're\n # running this in a loop and later remotes without\n # changes would override this value and make it\n # incorrect.\n changed = True\n except Exception as exc:\n log.error(\n 'Exception caught while fetching %s remote \\'%s\\': %s',\n self.role, repo.id, exc,\n exc_info=True\n )\n return changed\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.compile_pillar
python
def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar
Render the pillar data and return
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L1028-L1083
[ "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n if strategy == 'smart':\n if renderer.split('|')[-1] == 'yamlex' or renderer.startswith('yamlex_'):\n strategy = 'aggregate'\n else:\n strategy = 'recurse'\n\n if strategy == 'list':\n merged = merge_list(obj_a, obj_b)\n elif strategy == 'recurse':\n merged = merge_recurse(obj_a, obj_b, merge_lists)\n elif strategy == 'aggregate':\n #: level = 1 merge at least root data\n merged = merge_aggregate(obj_a, obj_b)\n elif strategy == 'overwrite':\n merged = merge_overwrite(obj_a, obj_b, merge_lists)\n elif strategy == 'none':\n # If we do not want to merge, there is only one pillar passed, so we can safely use the default recurse,\n # we just do not want to log an error\n merged = merge_recurse(obj_a, obj_b)\n else:\n log.warning(\n 'Unknown merging strategy \\'%s\\', fallback to recurse',\n strategy\n )\n merged = merge_recurse(obj_a, obj_b)\n\n return merged\n", "def get_top(self):\n '''\n Returns the high data derived from the top file\n '''\n tops, errors = self.get_tops()\n try:\n merged_tops = self.merge_tops(tops)\n except TypeError as err:\n merged_tops = OrderedDict()\n errors.append('Error encountered while rendering pillar top file.')\n return merged_tops, errors\n", "def top_matches(self, top):\n '''\n Search through the top high data for matches and return the states\n that this minion needs to execute.\n\n Returns:\n {'saltenv': ['state1', 'state2', ...]}\n '''\n matches = {}\n for saltenv, body in six.iteritems(top):\n if self.opts['pillarenv']:\n if saltenv != self.opts['pillarenv']:\n continue\n for match, data in six.iteritems(body):\n if self.matchers['confirm_top.confirm_top'](\n match,\n data,\n self.opts.get('nodegroups', {}),\n ):\n if saltenv not in matches:\n matches[saltenv] = env_matches = []\n else:\n env_matches = matches[saltenv]\n for item in data:\n if isinstance(item, six.string_types) and item not in env_matches:\n env_matches.append(item)\n return matches\n", "def render_pillar(self, matches, errors=None):\n '''\n Extract the sls pillar files from the matches and render them into the\n pillar\n '''\n pillar = copy.copy(self.pillar_override)\n if errors is None:\n errors = []\n for saltenv, pstates in six.iteritems(matches):\n pstatefiles = []\n mods = set()\n for sls_match in pstates:\n matched_pstates = []\n try:\n matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match)\n except KeyError:\n errors.extend(\n ['No matching pillar environment for environment '\n '\\'{0}\\' found'.format(saltenv)]\n )\n if matched_pstates:\n pstatefiles.extend(matched_pstates)\n else:\n pstatefiles.append(sls_match)\n\n for sls in pstatefiles:\n pstate, mods, err = self.render_pstate(sls, saltenv, mods)\n\n if err:\n errors += err\n\n if pstate is not None:\n if not isinstance(pstate, dict):\n log.error(\n 'The rendered pillar sls file, \\'%s\\' state did '\n 'not return the expected data format. This is '\n 'a sign of a malformed pillar sls file. Returned '\n 'errors: %s',\n sls,\n ', '.join([\"'{0}'\".format(e) for e in errors])\n )\n continue\n pillar = merge(\n pillar,\n pstate,\n self.merge_strategy,\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n\n return pillar, errors\n", "def decrypt_pillar(self, pillar):\n '''\n Decrypt the specified pillar dictionary items, if configured to do so\n '''\n errors = []\n if self.opts.get('decrypt_pillar'):\n decrypt_pillar = self.opts['decrypt_pillar']\n if not isinstance(decrypt_pillar, dict):\n decrypt_pillar = \\\n salt.utils.data.repack_dictlist(self.opts['decrypt_pillar'])\n if not decrypt_pillar:\n errors.append('decrypt_pillar config option is malformed')\n for key, rend in six.iteritems(decrypt_pillar):\n ptr = salt.utils.data.traverse_dict(\n pillar,\n key,\n default=None,\n delimiter=self.opts['decrypt_pillar_delimiter'])\n if ptr is None:\n log.debug('Pillar key %s not present', key)\n continue\n try:\n hash(ptr)\n immutable = True\n except TypeError:\n immutable = False\n try:\n ret = salt.utils.crypt.decrypt(\n ptr,\n rend or self.opts['decrypt_pillar_default'],\n renderers=self.rend,\n opts=self.opts,\n valid_rend=self.opts['decrypt_pillar_renderers'])\n if immutable:\n # Since the key pointed to an immutable type, we need\n # to replace it in the pillar dict. First we will find\n # the parent, and then we will replace the child key\n # with the return data from the renderer.\n parent, _, child = key.rpartition(\n self.opts['decrypt_pillar_delimiter'])\n if not parent:\n # key is a top-level key, so the pointer to the\n # parent is the pillar dict itself.\n ptr = pillar\n else:\n ptr = salt.utils.data.traverse_dict(\n pillar,\n parent,\n default=None,\n delimiter=self.opts['decrypt_pillar_delimiter'])\n if ptr is not None:\n ptr[child] = ret\n except Exception as exc:\n msg = 'Failed to decrypt pillar key \\'{0}\\': {1}'.format(\n key, exc\n )\n errors.append(msg)\n log.error(msg, exc_info=True)\n return errors\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/pillar/__init__.py
Pillar.decrypt_pillar
python
def decrypt_pillar(self, pillar): ''' Decrypt the specified pillar dictionary items, if configured to do so ''' errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if not isinstance(decrypt_pillar, dict): decrypt_pillar = \ salt.utils.data.repack_dictlist(self.opts['decrypt_pillar']) if not decrypt_pillar: errors.append('decrypt_pillar config option is malformed') for key, rend in six.iteritems(decrypt_pillar): ptr = salt.utils.data.traverse_dict( pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is None: log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt( ptr, rend or self.opts['decrypt_pillar_default'], renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: # Since the key pointed to an immutable type, we need # to replace it in the pillar dict. First we will find # the parent, and then we will replace the child key # with the return data from the renderer. parent, _, child = key.rpartition( self.opts['decrypt_pillar_delimiter']) if not parent: # key is a top-level key, so the pointer to the # parent is the pillar dict itself. ptr = pillar else: ptr = salt.utils.data.traverse_dict( pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if ptr is not None: ptr[child] = ret except Exception as exc: msg = 'Failed to decrypt pillar key \'{0}\': {1}'.format( key, exc ) errors.append(msg) log.error(msg, exc_info=True) return errors
Decrypt the specified pillar dictionary items, if configured to do so
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L1085-L1143
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):\n '''\n Traverse a dict using a colon-delimited (or otherwise delimited, using the\n 'delimiter' param) target string. The target 'foo:bar:baz' will return\n data['foo']['bar']['baz'] if this value exists, and will otherwise return\n the dict in the default argument.\n '''\n ptr = data\n try:\n for each in key.split(delimiter):\n ptr = ptr[each]\n except (KeyError, IndexError, TypeError):\n # Encountered a non-indexable value in the middle of traversing\n return default\n return ptr\n", "def decrypt(data,\n rend,\n translate_newlines=False,\n renderers=None,\n opts=None,\n valid_rend=None):\n '''\n .. versionadded:: 2017.7.0\n\n Decrypt a data structure using the specified renderer. Written originally\n as a common codebase to handle decryption of encrypted elements within\n Pillar data, but should be flexible enough for other uses as well.\n\n Returns the decrypted result, but any decryption renderer should be\n recursively decrypting mutable types in-place, so any data structure passed\n should be automagically decrypted using this function. Immutable types\n obviously won't, so it's a good idea to check if ``data`` is hashable in\n the calling function, and replace the original value with the decrypted\n result if that is not the case. For an example of this, see\n salt.pillar.Pillar.decrypt_pillar().\n\n data\n The data to be decrypted. This can be a string of ciphertext or a data\n structure. If it is a data structure, the items in the data structure\n will be recursively decrypted.\n\n rend\n The renderer used to decrypt\n\n translate_newlines : False\n If True, then the renderer will convert a literal backslash followed by\n an 'n' into a newline before performing the decryption.\n\n renderers\n Optionally pass a loader instance containing loaded renderer functions.\n If not passed, then the ``opts`` will be required and will be used to\n invoke the loader to get the available renderers. Where possible,\n renderers should be passed to avoid the overhead of loading them here.\n\n opts\n The master/minion configuration opts. Used only if renderers are not\n passed.\n\n valid_rend\n A list containing valid renderers, used to restrict the renderers which\n this function will be allowed to use. If not passed, no restriction\n will be made.\n '''\n try:\n if valid_rend and rend not in valid_rend:\n raise SaltInvocationError(\n '\\'{0}\\' is not a valid decryption renderer. Valid choices '\n 'are: {1}'.format(rend, ', '.join(valid_rend))\n )\n except TypeError as exc:\n # SaltInvocationError inherits TypeError, so check for it first and\n # raise if needed.\n if isinstance(exc, SaltInvocationError):\n raise\n # 'valid' argument is not iterable\n log.error('Non-iterable value %s passed for valid_rend', valid_rend)\n\n if renderers is None:\n if opts is None:\n raise TypeError('opts are required')\n renderers = salt.loader.render(opts, {})\n\n rend_func = renderers.get(rend)\n if rend_func is None:\n raise SaltInvocationError(\n 'Decryption renderer \\'{0}\\' is not available'.format(rend)\n )\n\n return rend_func(data, translate_newlines=translate_newlines)\n" ]
class Pillar(object): ''' Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: if opts.get('pillarenv_from_saltenv', False): opts['pillarenv'] = saltenv # use the local file client self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv) self.saltenv = saltenv self.client = salt.fileclient.get_file_client(self.opts, True) self.avail = self.__gather_avail() if opts.get('file_client', '') == 'local': opts['grains'] = grains # if we didn't pass in functions, lets load them if functions is None: utils = salt.loader.utils(opts) if opts.get('file_client', '') == 'local': self.functions = salt.loader.minion_mods(opts, utils=utils) else: self.functions = salt.loader.minion_mods(self.opts, utils=utils) else: self.functions = functions self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) # Keep the incoming opts ID intact, ie, the master id if 'id' in opts: ext_pillar_opts['id'] = opts['id'] self.merge_strategy = 'smart' if opts.get('pillar_source_merging_strategy'): self.merge_strategy = opts['pillar_source_merging_strategy'] self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions) self.ignored_pillars = {} self.pillar_override = pillar_override or {} if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') self.extra_minion_data = extra_minion_data or {} if not isinstance(self.extra_minion_data, dict): self.extra_minion_data = {} log.error('Extra minion data must be a dictionary') self._closing = False def __valid_on_demand_ext_pillar(self, opts): ''' Check to see if the on demand external pillar is allowed ''' if not isinstance(self.ext, dict): log.error( 'On-demand pillar %s is not formatted as a dictionary', self.ext ) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if x not in on_demand]) except TypeError: # Prevent traceback when on_demand_ext_pillar option is malformed log.error( 'The \'on_demand_ext_pillar\' configuration option is ' 'malformed, it should be a list of ext_pillar module names' ) return False if invalid_on_demand: log.error( 'The following ext_pillar modules are not allowed for ' 'on-demand pillar data: %s. Valid on-demand ext_pillar ' 'modules are: %s. The valid modules can be adjusted by ' 'setting the \'on_demand_ext_pillar\' config option.', ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand), ) return False return True def __gather_avail(self): ''' Gather the lists of available sls data from the master ''' avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None): ''' The options need to be altered to conform to the file client ''' opts = copy.deepcopy(opts_in) opts['file_client'] = 'local' if not grains: opts['grains'] = {} else: opts['grains'] = grains # Allow minion/CLI saltenv/pillarenv to take precedence over master opts['saltenv'] = saltenv \ if saltenv is not None \ else opts.get('saltenv') opts['pillarenv'] = pillarenv \ if pillarenv is not None \ else opts.get('pillarenv') opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if self.ext and self.__valid_on_demand_ext_pillar(opts): if 'ext_pillar' in opts: opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] if '__env__' in opts['pillar_roots']: env = opts.get('pillarenv') or opts.get('saltenv') or 'base' if env not in opts['pillar_roots']: log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env) opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__') else: log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)", env) opts['pillar_roots'].pop('__env__') return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' envs = set(['base']) if 'pillar_roots' in self.opts: envs.update(list(self.opts['pillar_roots'])) return envs def get_tops(self): ''' Gather the top files ''' tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] # Gather initial top files try: saltenvs = set() if self.opts['pillarenv']: # If the specified pillarenv is not present in the available # pillar environments, do not cache the pillar top file. if self.opts['pillarenv'] not in self.opts['pillar_roots']: log.debug( 'pillarenv \'%s\' not found in the configured pillar ' 'environments (%s)', self.opts['pillarenv'], ', '.join(self.opts['pillar_roots']) ) else: saltenvs.add(self.opts['pillarenv']) else: saltenvs = self._get_envs() if self.opts.get('pillar_source_merging_strategy', None) == "none": saltenvs &= set([self.saltenv or 'base']) for saltenv in saltenvs: top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template( top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, )) except Exception as exc: errors.append( ('Rendering Primary Top file failed, render error:\n{0}' .format(exc))) log.exception('Pillar rendering failed for minion %s', self.minion_id) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: if 'include' not in ctop: continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') # Go through the includes and pull out the extra tops and add them while include: pops = [] for saltenv, states in six.iteritems(include): pops.append(saltenv) if not states: continue for sls in states: if sls in done[saltenv]: continue try: tops[saltenv].append( compile_template( self.client.get_state( sls, saltenv ).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True, ) ) except Exception as exc: errors.append( ('Rendering Top file {0} failed, render error' ':\n{1}').format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if saltenv in include: include.pop(saltenv) return tops, errors def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders) def sort_top_targets(self, top, orders): ''' Returns the sorted high data from the merged top files ''' sorted_top = collections.defaultdict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, targets in six.iteritems(top): sorted_targets = sorted(targets, key=lambda target: orders[saltenv][target]) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] # pylint: enable=cell-var-from-loop return sorted_top def get_top(self): ''' Returns the high data derived from the top file ''' tops, errors = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return merged_tops, errors def top_matches(self, top): ''' Search through the top high data for matches and return the states that this minion needs to execute. Returns: {'saltenv': ['state1', 'state2', ...]} ''' matches = {} for saltenv, body in six.iteritems(top): if self.opts['pillarenv']: if saltenv != self.opts['pillarenv']: continue for match, data in six.iteritems(body): if self.matchers['confirm_top.confirm_top']( match, data, self.opts.get('nodegroups', {}), ): if saltenv not in matches: matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if isinstance(item, six.string_types) and item not in env_matches: env_matches.append(item) return matches def render_pstate(self, sls, saltenv, mods, defaults=None): ''' Collect a single pillar sls file and render it ''' if defaults is None: defaults = {} err = '' errors = [] state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get('dest', False) if not fn_: if sls in self.ignored_pillars.get(saltenv, []): log.debug('Skipping ignored and missing SLS \'%s\' in ' 'environment \'%s\'', sls, saltenv) return None, mods, errors elif self.opts['pillar_roots'].get(saltenv): msg = ('Specified SLS \'{0}\' in environment \'{1}\' is not' ' available on the salt master').format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = ('Specified SLS \'{0}\' in environment \'{1}\' was not ' 'found. '.format(sls, saltenv)) if self.opts.get('__git_pillar', False) is True: msg += ( 'This is likely caused by a git_pillar top file ' 'containing an environment other than the one for the ' 'branch in which it resides. Each git_pillar ' 'branch/tag must have its own top file.' ) else: msg += ( 'This could be because SLS \'{0}\' is in an ' 'environment other than \'{1}\', but \'{1}\' is ' 'included in that environment\'s Pillar top file. It ' 'could also be due to environment \'{1}\' not being ' 'defined in \'pillar_roots\'.'.format(sls, saltenv) ) log.debug(msg) # return state, mods, errors return None, mods, errors state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = 'Rendering SLS \'{0}\' failed, render error:\n{1}'.format( sls, exc ) log.critical(msg, exc_info=True) if self.opts.get('pillar_safe_render_error', True): errors.append( 'Rendering SLS \'{0}\' failed. Please see master log for ' 'details.'.format(sls) ) else: errors.append(msg) mods.add(sls) nstate = None if state: if not isinstance(state, dict): msg = 'SLS \'{0}\' does not render to a dictionary'.format(sls) log.error(msg) errors.append(msg) else: if 'include' in state: if not isinstance(state['include'], list): msg = ('Include Declaration in SLS \'{0}\' is not ' 'formed as a list'.format(sls)) log.error(msg) errors.append(msg) else: # render included state(s) include_states = [] matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None try: if sub_sls.startswith('.'): if state_data.get('source', '').endswith('/init.sls'): include_parts = sls.split('.') else: include_parts = sls.split('.')[:-1] sub_sls = '.'.join(include_parts+[sub_sls[1:]]) matches = fnmatch.filter( self.avail[saltenv], sub_sls, ) matched_pstates.extend(matches) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) for sub_sls in matched_pstates: if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, saltenv, mods, defaults ) if nstate: if key: # If key is x:y, convert it to {x: {y: nstate}} for key_fragment in reversed(key.split(":")): nstate = { key_fragment: nstate } if not self.opts.get('pillar_includes_override_sls', False): include_states.append(nstate) else: state = merge( state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are # authoritative. include_states.append(state) state = None for s in include_states: if state is None: state = s else: state = merge( state, s, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return state, mods, errors def render_pillar(self, matches, errors=None): ''' Extract the sls pillar files from the matches and render them into the pillar ''' pillar = copy.copy(self.pillar_override) if errors is None: errors = [] for saltenv, pstates in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend( ['No matching pillar environment for environment ' '\'{0}\' found'.format(saltenv)] ) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: pstate, mods, err = self.render_pstate(sls, saltenv, mods) if err: errors += err if pstate is not None: if not isinstance(pstate, dict): log.error( 'The rendered pillar sls file, \'%s\' state did ' 'not return the expected data format. This is ' 'a sign of a malformed pillar sls file. Returned ' 'errors: %s', sls, ', '.join(["'{0}'".format(e) for e in errors]) ) continue pillar = merge( pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return pillar, errors def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) else: if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, val, extra_minion_data=self.extra_minion_data) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' if errors is None: errors = [] try: # Make sure that on-demand git_pillar is fetched before we try to # compile the pillar data. git_pillar will fetch a remote when # the git ext_pillar() func is run, but only for masterless. if self.ext and 'git' in self.ext \ and self.opts.get('__role') != 'minion': # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar git_pillar = salt.utils.gitfs.GitPillar( self.opts, self.ext['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar pass if 'ext_pillar' not in self.opts: return pillar, errors if not isinstance(self.opts['ext_pillar'], list): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return pillar, errors ext = None # Bring in CLI pillar data if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if not isinstance(run, dict): errors.append('The "ext_pillar" option is malformed') log.critical(errors[-1]) return {}, errors if next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', []): continue for key, val in six.iteritems(run): if key not in self.ext_pillars: log.critical( 'Specified ext_pillar interface %s is unavailable', key ) continue try: ext = self._external_pillar_data(pillar, val, key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( key, exc.__str__(), ) ) log.error( 'Exception caught loading ext_pillar \'%s\':\n%s', key, ''.join(traceback.format_tb(sys.exc_info()[2])) ) if ext: pillar = merge( pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return pillar, errors def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if 'grains' in mopts: mopts.pop('grains') mopts['saltversion'] = __version__ pillar['master'] = mopts if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False): pillar = merge( self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if errors: for error in errors: log.critical('Pillar render error: %s', error) pillar['_errors'] = errors if self.pillar_override: pillar = merge( pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar def destroy(self): ''' This method exist in order to be API compatible with RemotePillar ''' if self._closing: return self._closing = True def __del__(self): self.destroy()
saltstack/salt
salt/modules/win_pki.py
_cmd_run
python
def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout']
Ensure that the Pki module is loaded, and convert to and extract data from Json as needed.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L65-L91
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
_validate_cert_path
python
def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name))
Ensure that the certificate path, as determind from user input, is valid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L94-L101
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
_validate_cert_format
python
def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message)
Ensure that the certificate format, as determind from user input, is valid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L104-L113
null
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
get_stores
python
def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret
Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L116-L140
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
get_certs
python
def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret
Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L143-L183
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n", "def _validate_cert_path(name):\n '''\n Ensure that the certificate path, as determind from user input, is valid.\n '''\n cmd = r\"Test-Path -Path '{0}'\".format(name)\n\n if not ast.literal_eval(_cmd_run(cmd=cmd)):\n raise SaltInvocationError(r\"Invalid path specified: {0}\".format(name))\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
get_cert_file
python
def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret
Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L186-L250
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n", "def _validate_cert_format(name):\n '''\n Ensure that the certificate format, as determind from user input, is valid.\n '''\n cert_formats = ['cer', 'pfx']\n\n if name not in cert_formats:\n message = (\"Invalid certificate format '{0}' specified. Valid formats:\"\n ' {1}').format(name, cert_formats)\n raise SaltInvocationError(message)\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
import_cert
python
def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False
Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L253-L344
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n", "def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):\n '''\n Get the available certificates in the given store.\n\n :param str context: The name of the certificate store location context.\n :param str store: The name of the certificate store.\n\n :return: A dictionary of the certificate thumbprints and properties.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_pki.get_certs\n '''\n ret = dict()\n cmd = list()\n blacklist_keys = ['DnsNameList']\n store_path = r'Cert:\\{0}\\{1}'.format(context, store)\n\n _validate_cert_path(name=store_path)\n\n cmd.append(r\"Get-ChildItem -Path '{0}' | Select-Object\".format(store_path))\n cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version')\n\n items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True)\n\n for item in items:\n cert_info = dict()\n for key in item:\n if key not in blacklist_keys:\n cert_info[key.lower()] = item[key]\n\n names = item.get('DnsNameList', None)\n if isinstance(names, list):\n cert_info['dnsnames'] = [name.get('Unicode') for name in names]\n else:\n cert_info['dnsnames'] = []\n ret[item['Thumbprint']] = cert_info\n return ret\n", "def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''):\n '''\n Get the details of the certificate file.\n\n :param str name: The filesystem path of the certificate file.\n :param str cert_format: The certificate format. Specify 'cer' for X.509, or\n 'pfx' for PKCS #12.\n :param str password: The password of the certificate. Only applicable to pfx\n format. Note that if used interactively, the password will be seen by all minions.\n To protect the password, use a state and get the password from pillar.\n\n :return: A dictionary of the certificate thumbprints and properties.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_pki.get_cert_file name='C:\\\\certs\\\\example.cer'\n '''\n ret = dict()\n cmd = list()\n blacklist_keys = ['DnsNameList']\n cert_format = cert_format.lower()\n\n _validate_cert_format(name=cert_format)\n\n if not name or not os.path.isfile(name):\n _LOG.error('Path is not present: %s', name)\n return ret\n\n if cert_format == 'pfx':\n if password:\n cmd.append('$CertObject = New-Object')\n cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;')\n cmd.append(r\" $CertObject.Import('{0}'\".format(name))\n cmd.append(\",'{0}'\".format(password))\n cmd.append(\",'DefaultKeySet') ; $CertObject\")\n cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, '\n 'Thumbprint, Version')\n else:\n cmd.append(r\"Get-PfxCertificate -FilePath '{0}'\".format(name))\n cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, '\n 'Thumbprint, Version')\n else:\n cmd.append('$CertObject = New-Object')\n cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;')\n cmd.append(r\" $CertObject.Import('{0}'); $CertObject\".format(name))\n cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, '\n 'Thumbprint, Version')\n\n items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True)\n\n for item in items:\n for key in item:\n if key not in blacklist_keys:\n ret[key.lower()] = item[key]\n\n ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]\n\n if ret:\n _LOG.debug('Certificate thumbprint obtained successfully: %s', name)\n else:\n _LOG.error('Unable to obtain certificate thumbprint: %s', name)\n return ret\n", "def _validate_cert_format(name):\n '''\n Ensure that the certificate format, as determind from user input, is valid.\n '''\n cert_formats = ['cer', 'pfx']\n\n if name not in cert_formats:\n message = (\"Invalid certificate format '{0}' specified. Valid formats:\"\n ' {1}').format(name, cert_formats)\n raise SaltInvocationError(message)\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
export_cert
python
def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret
Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L347-L409
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n", "def _validate_cert_path(name):\n '''\n Ensure that the certificate path, as determind from user input, is valid.\n '''\n cmd = r\"Test-Path -Path '{0}'\".format(name)\n\n if not ast.literal_eval(_cmd_run(cmd=cmd)):\n raise SaltInvocationError(r\"Invalid path specified: {0}\".format(name))\n", "def _validate_cert_format(name):\n '''\n Ensure that the certificate format, as determind from user input, is valid.\n '''\n cert_formats = ['cer', 'pfx']\n\n if name not in cert_formats:\n message = (\"Invalid certificate format '{0}' specified. Valid formats:\"\n ' {1}').format(name, cert_formats)\n raise SaltInvocationError(message)\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
saltstack/salt
salt/modules/win_pki.py
remove_cert
python
def remove_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000' ''' thumbprint = thumbprint.upper() store_path = r'Cert:\{0}\{1}'.format(context, store) cert_path = r'{0}\{1}'.format(store_path, thumbprint) cmd = r"Remove-Item -Path '{0}'".format(cert_path) current_certs = get_certs(context=context, store=store) if thumbprint not in current_certs: _LOG.debug("Certificate '%s' already absent in store: %s", thumbprint, store_path) return True _validate_cert_path(name=cert_path) _cmd_run(cmd=cmd) new_certs = get_certs(context=context, store=store) if thumbprint in new_certs: _LOG.error('Unable to remove certificate: %s', cert_path) return False _LOG.debug('Certificate removed successfully: %s', cert_path) return True
Remove the certificate from the given certificate store. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.remove_cert thumbprint='AAA000'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pki.py#L459-L497
[ "def _cmd_run(cmd, as_json=False):\n '''\n Ensure that the Pki module is loaded, and convert to and extract data from\n Json as needed.\n '''\n cmd_full = ['Import-Module -Name PKI; ']\n\n if as_json:\n cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject '\n r'@({0})'.format(cmd))\n else:\n cmd_full.append(cmd)\n cmd_ret = __salt__['cmd.run_all'](\n six.text_type().join(cmd_full), shell='powershell', python_shell=True)\n\n if cmd_ret['retcode'] != 0:\n _LOG.error('Unable to execute command: %s\\nError: %s', cmd,\n cmd_ret['stderr'])\n\n if as_json:\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n return items\n except ValueError:\n _LOG.error('Unable to parse return data as Json.')\n\n return cmd_ret['stdout']\n", "def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):\n '''\n Get the available certificates in the given store.\n\n :param str context: The name of the certificate store location context.\n :param str store: The name of the certificate store.\n\n :return: A dictionary of the certificate thumbprints and properties.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_pki.get_certs\n '''\n ret = dict()\n cmd = list()\n blacklist_keys = ['DnsNameList']\n store_path = r'Cert:\\{0}\\{1}'.format(context, store)\n\n _validate_cert_path(name=store_path)\n\n cmd.append(r\"Get-ChildItem -Path '{0}' | Select-Object\".format(store_path))\n cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version')\n\n items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True)\n\n for item in items:\n cert_info = dict()\n for key in item:\n if key not in blacklist_keys:\n cert_info[key.lower()] = item[key]\n\n names = item.get('DnsNameList', None)\n if isinstance(names, list):\n cert_info['dnsnames'] = [name.get('Unicode') for name in names]\n else:\n cert_info['dnsnames'] = []\n ret[item['Thumbprint']] = cert_info\n return ret\n", "def _validate_cert_path(name):\n '''\n Ensure that the certificate path, as determind from user input, is valid.\n '''\n cmd = r\"Test-Path -Path '{0}'\".format(name)\n\n if not ast.literal_eval(_cmd_run(cmd=cmd)):\n raise SaltInvocationError(r\"Invalid path specified: {0}\".format(name))\n" ]
# -*- coding: utf-8 -*- ''' Microsoft certificate management via the PKI Client PowerShell module. https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient The PKI Client PowerShell module is only available on Windows 8+ and Windows Server 2012+. https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows :depends: - PowerShell 4 - PKI Client Module (Windows 8+ / Windows Server 2012+) .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import ast import logging import os # Import salt libs import salt.utils.json import salt.utils.platform import salt.utils.powershell import salt.utils.versions from salt.exceptions import SaltInvocationError # Import 3rd party libs from salt.ext import six _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' _DEFAULT_STORE = 'My' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_pki' def __virtual__(): ''' Requires Windows Requires Windows 8+ / Windows Server 2012+ Requires PowerShell Requires PKI Client PowerShell module installed. ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2.9200') == -1: return False, 'Only available on Windows 8+ / Windows Server 2012 +' if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' if not salt.utils.powershell.module_exists('PKI'): return False, 'PowerShell PKI module not available' return __virtualname__ def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout'] def _validate_cert_path(name): ''' Ensure that the certificate path, as determind from user input, is valid. ''' cmd = r"Test-Path -Path '{0}'".format(name) if not ast.literal_eval(_cmd_run(cmd=cmd)): raise SaltInvocationError(r"Invalid path specified: {0}".format(name)) def _validate_cert_format(name): ''' Ensure that the certificate format, as determind from user input, is valid. ''' cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message) def get_stores(): ''' Get the certificate location contexts and their corresponding stores. :return: A dictionary of the certificate location contexts and stores. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_stores ''' ret = dict() cmd = r"Get-ChildItem -Path 'Cert:\' | " \ r"Select-Object LocationName, StoreNames" items = _cmd_run(cmd=cmd, as_json=True) for item in items: ret[item['LocationName']] = list() for store in item['StoreNames']: ret[item['LocationName']].append(store) return ret def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret def import_cert(name, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, exportable=True, password='', saltenv='base'): ''' Import the certificate file into the given certificate store. :param str name: The path of the certificate file to import. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool exportable: Mark the certificate as exportable. Only applicable to pfx format. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :param str saltenv: The environment the file resides in. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.import_cert name='salt://cert.cer' ''' cmd = list() thumbprint = None store_path = r'Cert:\{0}\{1}'.format(context, store) cert_format = cert_format.lower() _validate_cert_format(name=cert_format) cached_source_path = __salt__['cp.cache_file'](name, saltenv) if not cached_source_path: _LOG.error('Unable to get cached copy of file: %s', name) return False if password: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password) else: cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format) current_certs = get_certs(context=context, store=store) if cert_props['thumbprint'] in current_certs: _LOG.debug("Certificate thumbprint '%s' already present in store: %s", cert_props['thumbprint'], store_path) return True if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Import-PfxCertificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) cmd.append(r" -Password $Password") if exportable: cmd.append(' -Exportable') else: cmd.append(r"Import-Certificate " r"-FilePath '{0}'".format(cached_source_path)) cmd.append(r" -CertStoreLocation '{0}'".format(store_path)) _cmd_run(cmd=six.text_type().join(cmd)) new_certs = get_certs(context=context, store=store) for new_cert in new_certs: if new_cert not in current_certs: thumbprint = new_cert if thumbprint: _LOG.debug('Certificate imported successfully: %s', name) return True _LOG.error('Unable to import certificate: %s', name) return False def export_cert(name, thumbprint, cert_format=_DEFAULT_FORMAT, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, password=''): ''' Export the certificate to a file from the given certificate store. :param str name: The destination path for the exported certificate file. :param str thumbprint: The thumbprint value of the target certificate. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.export_cert name='C:\\certs\\example.cer' thumbprint='AAA000' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cert_format = cert_format.lower() _validate_cert_path(name=cert_path) _validate_cert_format(name=cert_format) if cert_format == 'pfx': # In instances where an empty password is needed, we use a # System.Security.SecureString object since ConvertTo-SecureString will # not convert an empty string. if password: cmd.append(r"$Password = ConvertTo-SecureString " r"-String '{0}'".format(password)) cmd.append(' -AsPlainText -Force; ') else: cmd.append('$Password = New-Object System.Security.SecureString; ') cmd.append(r"Export-PfxCertificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" -Password $Password") else: cmd.append(r"Export-Certificate " r"-Cert '{0}' -FilePath '{1}'".format(cert_path, name)) cmd.append(r" | Out-Null; Test-Path -Path '{0}'".format(name)) ret = ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd))) if ret: _LOG.debug('Certificate exported successfully: %s', name) else: _LOG.error('Unable to export certificate: %s', name) return ret def test_cert(thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE, untrusted_root=False, dns_name='', eku=''): ''' Check the certificate for validity. :param str thumbprint: The thumbprint value of the target certificate. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :param bool untrusted_root: Whether the root certificate is required to be trusted in chain building. :param str dns_name: The DNS name to verify as valid for the certificate. :param str eku: The enhanced key usage object identifiers to verify for the certificate chain. :return: A boolean representing whether the certificate was considered valid. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_pki.test_cert thumbprint='AAA000' dns_name='example.test' ''' cmd = list() thumbprint = thumbprint.upper() cert_path = r'Cert:\{0}\{1}\{2}'.format(context, store, thumbprint) cmd.append(r"Test-Certificate -Cert '{0}'".format(cert_path)) _validate_cert_path(name=cert_path) if untrusted_root: cmd.append(' -AllowUntrustedRoot') if dns_name: cmd.append(" -DnsName '{0}'".format(dns_name)) if eku: cmd.append(" -EKU '{0}'".format(eku)) cmd.append(' -ErrorAction SilentlyContinue') return ast.literal_eval(_cmd_run(cmd=six.text_type().join(cmd)))
saltstack/salt
salt/states/postgres_schema.py
present
python
def present(dbname, name, owner=None, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' Ensure that the named schema is present in the database. dbname The database's name will work on name The name of the schema to manage user system user all operations should be performed on behalf of db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' ret = {'dbname': dbname, 'name': name, 'changes': {}, 'result': True, 'comment': 'Schema {0} is already present in ' 'database {1}'.format(name, dbname)} db_args = { 'db_user': db_user, 'db_password': db_password, 'db_host': db_host, 'db_port': db_port, 'user': user } # check if schema exists schema_attr = __salt__['postgres.schema_get'](dbname, name, **db_args) cret = None # The schema is not present, make it! if schema_attr is None: if __opts__['test']: ret['result'] = None ret['comment'] = 'Schema {0} is set to be created' \ ' in database {1}.'.format(name, dbname) return ret cret = __salt__['postgres.schema_create'](dbname, name, owner=owner, **db_args) else: msg = 'Schema {0} already exists in database {1}' cret = None if cret: msg = 'Schema {0} has been created in database {1}' ret['result'] = True ret['changes'][name] = 'Present' elif cret is not None: msg = 'Failed to create schema {0} in database {1}' ret['result'] = False else: msg = 'Schema {0} already exists in database {1}' ret['result'] = True ret['comment'] = msg.format(name, dbname) return ret
Ensure that the named schema is present in the database. dbname The database's name will work on name The name of the schema to manage user system user all operations should be performed on behalf of db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/postgres_schema.py#L31-L106
null
# -*- coding: utf-8 -*- ''' Management of PostgreSQL schemas ================================ The postgres_schemas module is used to create and manage Postgres schemas. .. code-block:: yaml public: postgres_schema.present 'dbname' 'name' ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging log = logging.getLogger(__name__) def __virtual__(): ''' Only load if the postgres module is present ''' if 'postgres.schema_exists' not in __salt__: return (False, 'Unable to load postgres module. Make sure `postgres.bins_dir` is set.') return True def absent(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' Ensure that the named schema is absent. dbname The database's name will work on name The name of the schema to remove user system user all operations should be performed on behalf of db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' ret = {'name': name, 'dbname': dbname, 'changes': {}, 'result': True, 'comment': ''} db_args = { 'db_user': db_user, 'db_password': db_password, 'db_host': db_host, 'db_port': db_port, 'user': user } # check if schema exists and remove it if __salt__['postgres.schema_exists'](dbname, name, **db_args): if __opts__['test']: ret['result'] = None ret['comment'] = 'Schema {0} is set to be removed' \ ' from database {1}'.format(name, dbname) return ret elif __salt__['postgres.schema_remove'](dbname, name, **db_args): ret['comment'] = 'Schema {0} has been removed' \ ' from database {1}'.format(name, dbname) ret['changes'][name] = 'Absent' return ret else: ret['result'] = False ret['comment'] = 'Schema {0} failed to be removed'.format(name) return ret else: ret['comment'] = 'Schema {0} is not present in database {1},' \ ' so it cannot be removed'.format(name, dbname) return ret
saltstack/salt
salt/returners/memcache_return.py
_get_serv
python
def _get_serv(ret): ''' Return a memcache server object ''' _options = _get_options(ret) host = _options.get('host') port = _options.get('port') log.debug('memcache server: %s:%s', host, port) if not host or not port: log.error('Host or port not defined in salt config') return # Combine host and port to conform syntax of python memcache client memcacheoptions = (host, port) return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0)
Return a memcache server object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L93-L110
[ "def _get_options(ret=None):\n '''\n Get the memcache options from salt.\n '''\n attrs = {'host': 'host',\n 'port': 'port'}\n\n _options = salt.returners.get_returner_options(__virtualname__,\n ret,\n attrs,\n __salt__=__salt__,\n __opts__=__opts__)\n return _options\n" ]
# -*- coding: utf-8 -*- ''' Return data to a memcache server To enable this returner the minion will need the python client for memcache installed and the following values configured in the minion or master config, these are the defaults. .. code-block:: yaml memcache.host: 'localhost' memcache.port: '11211' Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location. .. code-block:: yaml alternative.memcache.host: 'localhost' alternative.memcache.port: '11211' python2-memcache uses 'localhost' and '11211' as syntax on connection. To use the memcache returner, append '--return memcache' to the salt command. .. code-block:: bash salt '*' test.ping --return memcache To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return memcache --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return memcache --return_kwargs '{"host": "hostname.domain.com"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Import third party libs try: import memcache HAS_MEMCACHE = True except ImportError: HAS_MEMCACHE = False # Define the module's virtual name __virtualname__ = 'memcache' def __virtual__(): if not HAS_MEMCACHE: return False, 'Could not import memcache returner; ' \ 'memcache python client is not installed.' return __virtualname__ def _get_options(ret=None): ''' Get the memcache options from salt. ''' attrs = {'host': 'host', 'port': 'port'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__) return _options # # TODO: make memcacheoptions cluster aware # Servers can be passed in two forms: # 1. Strings of the form C{"host:port"}, which implies a default weight of 1 # 2. Tuples of the form C{("host:port", weight)}, where C{weight} is # an integer weight value. def _get_list(serv, key): value = serv.get(key) if value: return value.strip(',').split(',') return [] def _append_list(serv, key, value): if value in _get_list(serv, key): return r = serv.append(key, '{0},'.format(value)) if not r: serv.add(key, '{0},'.format(value)) def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def returner(ret): ''' Return data to a memcache data store ''' serv = _get_serv(ret) minion = ret['id'] jid = ret['jid'] fun = ret['fun'] rets = salt.utils.json.dumps(ret) serv.set('{0}:{1}'.format(jid, minion), rets) # cache for get_jid serv.set('{0}:{1}'.format(fun, minion), rets) # cache for get_fun # The following operations are neither efficient nor atomic. # If there is a way to make them so, this should be updated. _append_list(serv, 'minions', minion) _append_list(serv, 'jids', jid) def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' serv = _get_serv(ret=None) data = serv.get(jid) if data: return salt.utils.json.loads(data) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(jid)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_jids(): ''' Return a list of all job ids ''' serv = _get_serv(ret=None) jids = _get_list(serv, 'jids') loads = serv.get_multi(jids) # {jid: load, jid: load, ...} ret = {} for jid, load in six.iteritems(loads): ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret def get_minions(): ''' Return a list of minions ''' serv = _get_serv(ret=None) return _get_list(serv, 'minions')
saltstack/salt
salt/returners/memcache_return.py
returner
python
def returner(ret): ''' Return data to a memcache data store ''' serv = _get_serv(ret) minion = ret['id'] jid = ret['jid'] fun = ret['fun'] rets = salt.utils.json.dumps(ret) serv.set('{0}:{1}'.format(jid, minion), rets) # cache for get_jid serv.set('{0}:{1}'.format(fun, minion), rets) # cache for get_fun # The following operations are neither efficient nor atomic. # If there is a way to make them so, this should be updated. _append_list(serv, 'minions', minion) _append_list(serv, 'jids', jid)
Return data to a memcache data store
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L140-L155
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _get_serv(ret):\n '''\n Return a memcache server object\n '''\n\n _options = _get_options(ret)\n host = _options.get('host')\n port = _options.get('port')\n\n log.debug('memcache server: %s:%s', host, port)\n if not host or not port:\n log.error('Host or port not defined in salt config')\n return\n\n # Combine host and port to conform syntax of python memcache client\n memcacheoptions = (host, port)\n\n return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0)\n", "def _append_list(serv, key, value):\n if value in _get_list(serv, key):\n return\n r = serv.append(key, '{0},'.format(value))\n if not r:\n serv.add(key, '{0},'.format(value))\n" ]
# -*- coding: utf-8 -*- ''' Return data to a memcache server To enable this returner the minion will need the python client for memcache installed and the following values configured in the minion or master config, these are the defaults. .. code-block:: yaml memcache.host: 'localhost' memcache.port: '11211' Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location. .. code-block:: yaml alternative.memcache.host: 'localhost' alternative.memcache.port: '11211' python2-memcache uses 'localhost' and '11211' as syntax on connection. To use the memcache returner, append '--return memcache' to the salt command. .. code-block:: bash salt '*' test.ping --return memcache To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return memcache --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return memcache --return_kwargs '{"host": "hostname.domain.com"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Import third party libs try: import memcache HAS_MEMCACHE = True except ImportError: HAS_MEMCACHE = False # Define the module's virtual name __virtualname__ = 'memcache' def __virtual__(): if not HAS_MEMCACHE: return False, 'Could not import memcache returner; ' \ 'memcache python client is not installed.' return __virtualname__ def _get_options(ret=None): ''' Get the memcache options from salt. ''' attrs = {'host': 'host', 'port': 'port'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__) return _options def _get_serv(ret): ''' Return a memcache server object ''' _options = _get_options(ret) host = _options.get('host') port = _options.get('port') log.debug('memcache server: %s:%s', host, port) if not host or not port: log.error('Host or port not defined in salt config') return # Combine host and port to conform syntax of python memcache client memcacheoptions = (host, port) return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0) # # TODO: make memcacheoptions cluster aware # Servers can be passed in two forms: # 1. Strings of the form C{"host:port"}, which implies a default weight of 1 # 2. Tuples of the form C{("host:port", weight)}, where C{weight} is # an integer weight value. def _get_list(serv, key): value = serv.get(key) if value: return value.strip(',').split(',') return [] def _append_list(serv, key, value): if value in _get_list(serv, key): return r = serv.append(key, '{0},'.format(value)) if not r: serv.add(key, '{0},'.format(value)) def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' serv = _get_serv(ret=None) data = serv.get(jid) if data: return salt.utils.json.loads(data) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(jid)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_jids(): ''' Return a list of all job ids ''' serv = _get_serv(ret=None) jids = _get_list(serv, 'jids') loads = serv.get_multi(jids) # {jid: load, jid: load, ...} ret = {} for jid, load in six.iteritems(loads): ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret def get_minions(): ''' Return a list of minions ''' serv = _get_serv(ret=None) return _get_list(serv, 'minions')
saltstack/salt
salt/returners/memcache_return.py
save_load
python
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid)
Save the load to the specified jid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L158-L164
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _get_serv(ret):\n '''\n Return a memcache server object\n '''\n\n _options = _get_options(ret)\n host = _options.get('host')\n port = _options.get('port')\n\n log.debug('memcache server: %s:%s', host, port)\n if not host or not port:\n log.error('Host or port not defined in salt config')\n return\n\n # Combine host and port to conform syntax of python memcache client\n memcacheoptions = (host, port)\n\n return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0)\n", "def _append_list(serv, key, value):\n if value in _get_list(serv, key):\n return\n r = serv.append(key, '{0},'.format(value))\n if not r:\n serv.add(key, '{0},'.format(value))\n" ]
# -*- coding: utf-8 -*- ''' Return data to a memcache server To enable this returner the minion will need the python client for memcache installed and the following values configured in the minion or master config, these are the defaults. .. code-block:: yaml memcache.host: 'localhost' memcache.port: '11211' Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location. .. code-block:: yaml alternative.memcache.host: 'localhost' alternative.memcache.port: '11211' python2-memcache uses 'localhost' and '11211' as syntax on connection. To use the memcache returner, append '--return memcache' to the salt command. .. code-block:: bash salt '*' test.ping --return memcache To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return memcache --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return memcache --return_kwargs '{"host": "hostname.domain.com"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Import third party libs try: import memcache HAS_MEMCACHE = True except ImportError: HAS_MEMCACHE = False # Define the module's virtual name __virtualname__ = 'memcache' def __virtual__(): if not HAS_MEMCACHE: return False, 'Could not import memcache returner; ' \ 'memcache python client is not installed.' return __virtualname__ def _get_options(ret=None): ''' Get the memcache options from salt. ''' attrs = {'host': 'host', 'port': 'port'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__) return _options def _get_serv(ret): ''' Return a memcache server object ''' _options = _get_options(ret) host = _options.get('host') port = _options.get('port') log.debug('memcache server: %s:%s', host, port) if not host or not port: log.error('Host or port not defined in salt config') return # Combine host and port to conform syntax of python memcache client memcacheoptions = (host, port) return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0) # # TODO: make memcacheoptions cluster aware # Servers can be passed in two forms: # 1. Strings of the form C{"host:port"}, which implies a default weight of 1 # 2. Tuples of the form C{("host:port", weight)}, where C{weight} is # an integer weight value. def _get_list(serv, key): value = serv.get(key) if value: return value.strip(',').split(',') return [] def _append_list(serv, key, value): if value in _get_list(serv, key): return r = serv.append(key, '{0},'.format(value)) if not r: serv.add(key, '{0},'.format(value)) def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def returner(ret): ''' Return data to a memcache data store ''' serv = _get_serv(ret) minion = ret['id'] jid = ret['jid'] fun = ret['fun'] rets = salt.utils.json.dumps(ret) serv.set('{0}:{1}'.format(jid, minion), rets) # cache for get_jid serv.set('{0}:{1}'.format(fun, minion), rets) # cache for get_fun # The following operations are neither efficient nor atomic. # If there is a way to make them so, this should be updated. _append_list(serv, 'minions', minion) _append_list(serv, 'jids', jid) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' serv = _get_serv(ret=None) data = serv.get(jid) if data: return salt.utils.json.loads(data) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(jid)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_jids(): ''' Return a list of all job ids ''' serv = _get_serv(ret=None) jids = _get_list(serv, 'jids') loads = serv.get_multi(jids) # {jid: load, jid: load, ...} ret = {} for jid, load in six.iteritems(loads): ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret def get_minions(): ''' Return a list of minions ''' serv = _get_serv(ret=None) return _get_list(serv, 'minions')
saltstack/salt
salt/returners/memcache_return.py
get_fun
python
def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret
Return a dict of the last function called for all minions
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L199-L210
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def _get_serv(ret):\n '''\n Return a memcache server object\n '''\n\n _options = _get_options(ret)\n host = _options.get('host')\n port = _options.get('port')\n\n log.debug('memcache server: %s:%s', host, port)\n if not host or not port:\n log.error('Host or port not defined in salt config')\n return\n\n # Combine host and port to conform syntax of python memcache client\n memcacheoptions = (host, port)\n\n return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0)\n", "def _get_list(serv, key):\n value = serv.get(key)\n if value:\n return value.strip(',').split(',')\n return []\n" ]
# -*- coding: utf-8 -*- ''' Return data to a memcache server To enable this returner the minion will need the python client for memcache installed and the following values configured in the minion or master config, these are the defaults. .. code-block:: yaml memcache.host: 'localhost' memcache.port: '11211' Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location. .. code-block:: yaml alternative.memcache.host: 'localhost' alternative.memcache.port: '11211' python2-memcache uses 'localhost' and '11211' as syntax on connection. To use the memcache returner, append '--return memcache' to the salt command. .. code-block:: bash salt '*' test.ping --return memcache To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return memcache --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return memcache --return_kwargs '{"host": "hostname.domain.com"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Import third party libs try: import memcache HAS_MEMCACHE = True except ImportError: HAS_MEMCACHE = False # Define the module's virtual name __virtualname__ = 'memcache' def __virtual__(): if not HAS_MEMCACHE: return False, 'Could not import memcache returner; ' \ 'memcache python client is not installed.' return __virtualname__ def _get_options(ret=None): ''' Get the memcache options from salt. ''' attrs = {'host': 'host', 'port': 'port'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__) return _options def _get_serv(ret): ''' Return a memcache server object ''' _options = _get_options(ret) host = _options.get('host') port = _options.get('port') log.debug('memcache server: %s:%s', host, port) if not host or not port: log.error('Host or port not defined in salt config') return # Combine host and port to conform syntax of python memcache client memcacheoptions = (host, port) return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0) # # TODO: make memcacheoptions cluster aware # Servers can be passed in two forms: # 1. Strings of the form C{"host:port"}, which implies a default weight of 1 # 2. Tuples of the form C{("host:port", weight)}, where C{weight} is # an integer weight value. def _get_list(serv, key): value = serv.get(key) if value: return value.strip(',').split(',') return [] def _append_list(serv, key, value): if value in _get_list(serv, key): return r = serv.append(key, '{0},'.format(value)) if not r: serv.add(key, '{0},'.format(value)) def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def returner(ret): ''' Return data to a memcache data store ''' serv = _get_serv(ret) minion = ret['id'] jid = ret['jid'] fun = ret['fun'] rets = salt.utils.json.dumps(ret) serv.set('{0}:{1}'.format(jid, minion), rets) # cache for get_jid serv.set('{0}:{1}'.format(fun, minion), rets) # cache for get_fun # The following operations are neither efficient nor atomic. # If there is a way to make them so, this should be updated. _append_list(serv, 'minions', minion) _append_list(serv, 'jids', jid) def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' serv = _get_serv(ret=None) data = serv.get(jid) if data: return salt.utils.json.loads(data) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(jid)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_jids(): ''' Return a list of all job ids ''' serv = _get_serv(ret=None) jids = _get_list(serv, 'jids') loads = serv.get_multi(jids) # {jid: load, jid: load, ...} ret = {} for jid, load in six.iteritems(loads): ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret def get_minions(): ''' Return a list of minions ''' serv = _get_serv(ret=None) return _get_list(serv, 'minions')
saltstack/salt
salt/returners/memcache_return.py
get_jids
python
def get_jids(): ''' Return a list of all job ids ''' serv = _get_serv(ret=None) jids = _get_list(serv, 'jids') loads = serv.get_multi(jids) # {jid: load, jid: load, ...} ret = {} for jid, load in six.iteritems(loads): ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret
Return a list of all job ids
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L213-L223
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def format_jid_instance(jid, job):\n '''\n Format the jid correctly\n '''\n ret = format_job_instance(job)\n ret.update({'StartTime': jid_to_time(jid)})\n return ret\n", "def _get_serv(ret):\n '''\n Return a memcache server object\n '''\n\n _options = _get_options(ret)\n host = _options.get('host')\n port = _options.get('port')\n\n log.debug('memcache server: %s:%s', host, port)\n if not host or not port:\n log.error('Host or port not defined in salt config')\n return\n\n # Combine host and port to conform syntax of python memcache client\n memcacheoptions = (host, port)\n\n return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0)\n", "def _get_list(serv, key):\n value = serv.get(key)\n if value:\n return value.strip(',').split(',')\n return []\n" ]
# -*- coding: utf-8 -*- ''' Return data to a memcache server To enable this returner the minion will need the python client for memcache installed and the following values configured in the minion or master config, these are the defaults. .. code-block:: yaml memcache.host: 'localhost' memcache.port: '11211' Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location. .. code-block:: yaml alternative.memcache.host: 'localhost' alternative.memcache.port: '11211' python2-memcache uses 'localhost' and '11211' as syntax on connection. To use the memcache returner, append '--return memcache' to the salt command. .. code-block:: bash salt '*' test.ping --return memcache To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return memcache --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return memcache --return_kwargs '{"host": "hostname.domain.com"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Import third party libs try: import memcache HAS_MEMCACHE = True except ImportError: HAS_MEMCACHE = False # Define the module's virtual name __virtualname__ = 'memcache' def __virtual__(): if not HAS_MEMCACHE: return False, 'Could not import memcache returner; ' \ 'memcache python client is not installed.' return __virtualname__ def _get_options(ret=None): ''' Get the memcache options from salt. ''' attrs = {'host': 'host', 'port': 'port'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__) return _options def _get_serv(ret): ''' Return a memcache server object ''' _options = _get_options(ret) host = _options.get('host') port = _options.get('port') log.debug('memcache server: %s:%s', host, port) if not host or not port: log.error('Host or port not defined in salt config') return # Combine host and port to conform syntax of python memcache client memcacheoptions = (host, port) return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0) # # TODO: make memcacheoptions cluster aware # Servers can be passed in two forms: # 1. Strings of the form C{"host:port"}, which implies a default weight of 1 # 2. Tuples of the form C{("host:port", weight)}, where C{weight} is # an integer weight value. def _get_list(serv, key): value = serv.get(key) if value: return value.strip(',').split(',') return [] def _append_list(serv, key, value): if value in _get_list(serv, key): return r = serv.append(key, '{0},'.format(value)) if not r: serv.add(key, '{0},'.format(value)) def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def returner(ret): ''' Return data to a memcache data store ''' serv = _get_serv(ret) minion = ret['id'] jid = ret['jid'] fun = ret['fun'] rets = salt.utils.json.dumps(ret) serv.set('{0}:{1}'.format(jid, minion), rets) # cache for get_jid serv.set('{0}:{1}'.format(fun, minion), rets) # cache for get_fun # The following operations are neither efficient nor atomic. # If there is a way to make them so, this should be updated. _append_list(serv, 'minions', minion) _append_list(serv, 'jids', jid) def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.set(jid, salt.utils.json.dumps(load)) _append_list(serv, 'jids', jid) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' serv = _get_serv(ret=None) data = serv.get(jid) if data: return salt.utils.json.loads(data) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(jid)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret def get_minions(): ''' Return a list of minions ''' serv = _get_serv(ret=None) return _get_list(serv, 'minions')
saltstack/salt
salt/states/boto_elb.py
present
python
def present(name, listeners, availability_zones=None, subnets=None, subnet_names=None, security_groups=None, scheme='internet-facing', health_check=None, attributes=None, attributes_from_pillar="boto_elb_attributes", cnames=None, alarms=None, alarms_from_pillar="boto_elb_alarms", policies=None, policies_from_pillar="boto_elb_policies", backends=None, region=None, key=None, keyid=None, profile=None, wait_for_sync=True, tags=None, instance_ids=None, instance_names=None): ''' Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids. ''' # load data from attributes_from_pillar and merge with attributes tmp = __salt__['config.option'](attributes_from_pillar, {}) attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not isinstance(security_groups, (six.string_types, list, type(None))): msg = ("The 'security_group' parameter must be either a list or a " "comma-separated string.") log.error(msg) ret.update({'comment': msg, 'result': False}) return ret if isinstance(security_groups, six.string_types): security_groups = security_groups.split(',') _ret = _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile) ret.update({'changes': _ret['changes'], 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists and __opts__['test']: return ret if attributes: _ret = _attributes_present(name, attributes, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _health_check_present(name, health_check, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if cnames: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if lb: for cname in cnames: _ret = None dns_provider = 'boto_route53' cname.update({'record_type': 'CNAME', 'value': lb['dns_name']}) if 'provider' in cname: dns_provider = cname.pop('provider') if dns_provider == 'boto_route53': for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'): cname[p] = locals().get(p) if p not in cname else cname[p] _ret = __states__['boto_route53.present'](**cname) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _tags_present(name, tags, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if not instance_ids: instance_ids = [] if instance_names: # AWS borks on adding instances in "non-running" states, so filter 'em out. running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') for n in instance_names: instance_ids += __salt__['boto_ec2.find_instances']( name=n, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) # Backwards compat: Only touch attached instances if requested (e.g. if some are defined). if instance_ids: if __opts__['test']: if __salt__['boto_elb.set_instances']( name, instance_ids, True, region, key, keyid, profile): ret['comment'] += ' ELB {0} instances would be updated.'.format(name) ret['result'] = None else: success = __salt__['boto_elb.set_instances']( name, instance_ids, False, region, key, keyid, profile) if not success: ret['comment'] += "Failed to set requested instances." ret['result'] = False return ret
Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_elb.py#L261-L485
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):\n '''helper method for present. ensure that cloudwatch_alarms are set'''\n current = __salt__['config.option'](alarms_from_pillar, {})\n if alarms:\n current = salt.utils.dictupdate.update(current, alarms)\n ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}\n for _, info in six.iteritems(current):\n info[\"name\"] = name + \" \" + info[\"name\"]\n info[\"attributes\"][\"description\"] = name + \" \" + info[\"attributes\"][\"description\"]\n info[\"attributes\"][\"dimensions\"] = {\"LoadBalancerName\": [name]}\n kwargs = {\n \"name\": info[\"name\"],\n \"attributes\": info[\"attributes\"],\n \"region\": region,\n \"key\": key,\n \"keyid\": keyid,\n \"profile\": profile,\n }\n # No test=False cluase needed since the state handles that itself...\n results = __states__['boto_cloudwatch_alarm.present'](**kwargs)\n if not results.get('result'):\n ret[\"result\"] = results[\"result\"]\n if results.get(\"changes\", {}) != {}:\n ret[\"changes\"][info[\"name\"]] = results[\"changes\"]\n if \"comment\" in results:\n ret[\"comment\"] += results[\"comment\"]\n return ret\n", "def _elb_present(name, availability_zones, listeners, subnets, subnet_names,\n security_groups, scheme, region, key, keyid, profile):\n ret = {'result': True, 'comment': '', 'changes': {}}\n if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)):\n raise SaltInvocationError('Exactly one of availability_zones, subnets, '\n 'subnet_names must be provided as arguments.')\n if not listeners:\n listeners = []\n for listener in listeners:\n if len(listener) < 3:\n raise SaltInvocationError('Listeners must have at minimum port,'\n ' instance_port and protocol values in'\n ' the provided list.')\n if 'elb_port' not in listener:\n raise SaltInvocationError('elb_port is a required value for'\n ' listeners.')\n if 'instance_port' not in listener:\n raise SaltInvocationError('instance_port is a required value for'\n ' listeners.')\n if 'elb_protocol' not in listener:\n raise SaltInvocationError('elb_protocol is a required value for'\n ' listeners.')\n listener['elb_protocol'] = listener['elb_protocol'].upper()\n if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener:\n raise SaltInvocationError('certificate is a required value for'\n ' listeners if HTTPS is set for'\n ' elb_protocol.')\n\n # best attempt at principle of least surprise here:\n # only use the default pillar in cases where we don't explicitly\n # define policies OR policies_from_pillar on a listener\n policies = listener.setdefault('policies', [])\n policies_pillar = listener.get('policies_from_pillar', None)\n if not policies and policies_pillar is None:\n policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY\n if policies_pillar:\n policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], [])\n\n # Look up subnet ids from names if provided\n if subnet_names:\n subnets = []\n for i in subnet_names:\n r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region,\n key=key, keyid=keyid, profile=profile)\n if 'error' in r:\n ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error'])\n ret['result'] = False\n return ret\n if 'id' not in r:\n ret['comment'] = 'Subnet {0} does not exist.'.format(i)\n ret['result'] = False\n return ret\n subnets.append(r['id'])\n\n _security_groups = None\n if subnets:\n vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile)\n vpc_id = vpc_id.get('vpc_id')\n if not vpc_id:\n ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets)\n ret['result'] = False\n return ret\n _security_groups = __salt__['boto_secgroup.convert_to_group_ids'](\n security_groups, vpc_id=vpc_id, region=region, key=key,\n keyid=keyid, profile=profile\n )\n if not _security_groups:\n ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups)\n ret['result'] = False\n return ret\n exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile)\n if not exists:\n if __opts__['test']:\n ret['comment'] = 'ELB {0} is set to be created.'.format(name)\n ret['result'] = None\n return ret\n created = __salt__['boto_elb.create'](name=name,\n availability_zones=availability_zones,\n listeners=listeners, subnets=subnets,\n security_groups=_security_groups,\n scheme=scheme, region=region, key=key,\n keyid=keyid, profile=profile)\n if created:\n ret['changes']['old'] = {'elb': None}\n ret['changes']['new'] = {'elb': name}\n ret['comment'] = 'ELB {0} created.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to create {0} ELB.'.format(name)\n else:\n ret['comment'] = 'ELB {0} present.'.format(name)\n _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile)\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])\n ret['comment'] = ' '.join([ret['comment'], _ret['comment']])\n if not _ret['result']:\n ret['result'] = _ret['result']\n if ret['result'] is False:\n return ret\n _ret = _listeners_present(name, listeners, region, key, keyid, profile)\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])\n ret['comment'] = ' '.join([ret['comment'], _ret['comment']])\n if not _ret['result']:\n ret['result'] = _ret['result']\n if ret['result'] is False:\n return ret\n if availability_zones:\n _ret = _zones_present(name, availability_zones, region, key, keyid, profile)\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])\n ret['comment'] = ' '.join([ret['comment'], _ret['comment']])\n if not _ret['result']:\n ret['result'] = _ret['result']\n if ret['result'] is False:\n return ret\n elif subnets:\n _ret = _subnets_present(name, subnets, region, key, keyid, profile)\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])\n ret['comment'] = ' '.join([ret['comment'], _ret['comment']])\n if not _ret['result']:\n ret['result'] = _ret['result']\n return ret\n", "def _attributes_present(name, attributes, region, key, keyid, profile):\n ret = {'result': True, 'comment': '', 'changes': {}}\n _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid,\n profile)\n if not _attributes:\n ret['result'] = False\n ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name)\n return ret\n attrs_to_set = []\n if 'cross_zone_load_balancing' in attributes:\n czlb = attributes['cross_zone_load_balancing']\n _czlb = _attributes['cross_zone_load_balancing']\n if czlb['enabled'] != _czlb['enabled']:\n attrs_to_set.append('cross_zone_load_balancing')\n if 'connection_draining' in attributes:\n cd = attributes['connection_draining']\n _cd = _attributes['connection_draining']\n if (cd['enabled'] != _cd['enabled']\n or cd.get('timeout', 300) != _cd.get('timeout')):\n attrs_to_set.append('connection_draining')\n if 'connecting_settings' in attributes:\n cs = attributes['connecting_settings']\n _cs = _attributes['connecting_settings']\n if cs['idle_timeout'] != _cs['idle_timeout']:\n attrs_to_set.append('connecting_settings')\n if 'access_log' in attributes:\n for attr, val in six.iteritems(attributes['access_log']):\n if six.text_type(_attributes['access_log'][attr]) != six.text_type(val):\n attrs_to_set.append('access_log')\n if 's3_bucket_prefix' in attributes['access_log']:\n sbp = attributes['access_log']['s3_bucket_prefix']\n if sbp.startswith('/') or sbp.endswith('/'):\n raise SaltInvocationError('s3_bucket_prefix can not start or'\n ' end with /.')\n if attrs_to_set:\n if __opts__['test']:\n ret['comment'] = 'ELB {0} set to have attributes set.'.format(name)\n ret['result'] = None\n return ret\n was_set = __salt__['boto_elb.set_attributes'](name, attributes,\n region, key, keyid,\n profile)\n if was_set:\n ret['changes']['old'] = {'attributes': _attributes}\n ret['changes']['new'] = {'attributes': attributes}\n ret['comment'] = 'Set attributes on ELB {0}.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name)\n else:\n ret['comment'] = 'Attributes already set on ELB {0}.'.format(name)\n return ret\n", "def _health_check_present(name, health_check, region, key, keyid, profile):\n ret = {'result': True, 'comment': '', 'changes': {}}\n if not health_check:\n health_check = {}\n _health_check = __salt__['boto_elb.get_health_check'](name, region, key,\n keyid, profile)\n if not _health_check:\n ret['result'] = False\n ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name)\n return ret\n need_to_set = False\n for attr, val in six.iteritems(health_check):\n if six.text_type(_health_check[attr]) != six.text_type(val):\n need_to_set = True\n if need_to_set:\n if __opts__['test']:\n ret['comment'] = 'ELB {0} set to have health check set.'.format(name)\n ret['result'] = None\n return ret\n was_set = __salt__['boto_elb.set_health_check'](name, health_check,\n region, key, keyid,\n profile)\n if was_set:\n ret['changes']['old'] = {'health_check': _health_check}\n _health_check = __salt__['boto_elb.get_health_check'](name, region,\n key, keyid,\n profile)\n ret['changes']['new'] = {'health_check': _health_check}\n ret['comment'] = 'Set health check on ELB {0}.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name)\n else:\n ret['comment'] = 'Health check already set on ELB {0}.'.format(name)\n return ret\n", "def _policies_present(name, policies, policies_from_pillar, listeners, backends,\n region, key, keyid, profile):\n '''helper method for present. ensure that ELB policies are set'''\n if policies is None:\n policies = []\n pillar_policies = __salt__['config.option'](policies_from_pillar, [])\n policies = policies + pillar_policies\n if backends is None:\n backends = []\n\n # check for policy name uniqueness and correct type\n policy_names = set()\n for p in policies:\n if 'policy_name' not in p:\n raise SaltInvocationError('policy_name is a required value for '\n 'policies.')\n if 'policy_type' not in p:\n raise SaltInvocationError('policy_type is a required value for '\n 'policies.')\n if 'policy' not in p:\n raise SaltInvocationError('policy is a required value for '\n 'listeners.')\n # check for unique policy names\n if p['policy_name'] in policy_names:\n raise SaltInvocationError('Policy names must be unique: policy {0}'\n ' is declared twice.'.format(p['policy_name']))\n policy_names.add(p['policy_name'])\n\n # check that listeners refer to valid policy names\n for l in listeners:\n for p in l.get('policies', []):\n if p not in policy_names:\n raise SaltInvocationError('Listener {0} on ELB {1} refers to '\n 'undefined policy {2}.'.format(l['elb_port'], name, p))\n\n # check that backends refer to valid policy names\n for b in backends:\n for p in b.get('policies', []):\n if p not in policy_names:\n raise SaltInvocationError('Backend {0} on ELB {1} refers to '\n 'undefined policy '\n '{2}.'.format(b['instance_port'], name, p))\n\n ret = {'result': True, 'comment': '', 'changes': {}}\n\n lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)\n if not lb:\n ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name)\n ret['result'] = False\n return ret\n\n # Policies have two names:\n # - a short name ('name') that's only the policy name (e.g. testpolicy)\n # - a canonical name ('cname') that contains the policy type and hash\n # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524)\n\n policies_by_cname = {}\n cnames_by_name = {}\n for p in policies:\n cname = _policy_cname(p)\n policies_by_cname[cname] = p\n cnames_by_name[p['policy_name']] = cname\n\n expected_policy_names = policies_by_cname.keys()\n actual_policy_names = lb['policies']\n\n # This is sadly a huge hack to get around the fact that AWS assigns a\n # default SSLNegotiationPolicyType policy (with the naming scheme\n # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an\n # explicit policy set. If we don't keep track of the default policies and\n # explicitly exclude them from deletion, orchestration will fail because we\n # attempt to delete the default policy that's being used by listeners that\n # were created with no explicit policy.\n default_aws_policies = set()\n\n expected_policies_by_listener = {}\n for l in listeners:\n expected_policies_by_listener[l['elb_port']] = set(\n [cnames_by_name[p] for p in l.get('policies', [])])\n\n actual_policies_by_listener = {}\n for l in lb['listeners']:\n listener_policies = set(l.get('policies', []))\n actual_policies_by_listener[l['elb_port']] = listener_policies\n # Determine if any actual listener policies look like default policies,\n # so we can exclude them from deletion below (see note about this hack\n # above).\n for p in listener_policies:\n if re.match(r'^ELBSecurityPolicy-\\d{4}-\\d{2}$', p):\n default_aws_policies.add(p)\n\n expected_policies_by_backend = {}\n for b in backends:\n expected_policies_by_backend[b['instance_port']] = set(\n [cnames_by_name[p] for p in b.get('policies', [])])\n\n actual_policies_by_backend = {}\n for b in lb['backends']:\n backend_policies = set(b.get('policies', []))\n actual_policies_by_backend[b['instance_port']] = backend_policies\n\n to_delete = []\n to_create = []\n\n for policy_name in expected_policy_names:\n if policy_name not in actual_policy_names:\n to_create.append(policy_name)\n for policy_name in actual_policy_names:\n if policy_name not in expected_policy_names:\n if policy_name not in default_aws_policies:\n to_delete.append(policy_name)\n\n listeners_to_update = set()\n for port, policies in six.iteritems(expected_policies_by_listener):\n if policies != actual_policies_by_listener.get(port, set()):\n listeners_to_update.add(port)\n for port, policies in six.iteritems(actual_policies_by_listener):\n if policies != expected_policies_by_listener.get(port, set()):\n listeners_to_update.add(port)\n\n backends_to_update = set()\n for port, policies in six.iteritems(expected_policies_by_backend):\n if policies != actual_policies_by_backend.get(port, set()):\n backends_to_update.add(port)\n for port, policies in six.iteritems(actual_policies_by_backend):\n if policies != expected_policies_by_backend.get(port, set()):\n backends_to_update.add(port)\n\n if __opts__['test']:\n msg = []\n if to_create or to_delete:\n msg.append('ELB {0} set to have policies modified:'.format(name))\n for policy in to_create:\n msg.append('Policy {0} added.'.format(policy))\n for policy in to_delete:\n msg.append('Policy {0} deleted.'.format(policy))\n ret['result'] = None\n else:\n msg.append('Policies already set on ELB {0}.'.format(name))\n for listener in listeners_to_update:\n msg.append('Listener {0} policies updated.'.format(listener))\n for backend in backends_to_update:\n msg.append('Backend {0} policies updated.'.format(backend))\n ret['comment'] = ' '.join(msg)\n return ret\n\n if to_create:\n for policy_name in to_create:\n created = __salt__['boto_elb.create_policy'](\n name=name,\n policy_name=policy_name,\n policy_type=policies_by_cname[policy_name]['policy_type'],\n policy=policies_by_cname[policy_name]['policy'],\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if created:\n ret['changes'].setdefault(policy_name, {})['new'] = policy_name\n comment = \"Policy {0} was created on ELB {1}\".format(\n policy_name, name)\n ret['comment'] = ' '.join([ret['comment'], comment])\n ret['result'] = True\n else:\n ret['result'] = False\n return ret\n\n for port in listeners_to_update:\n policy_set = __salt__['boto_elb.set_listener_policy'](\n name=name,\n port=port,\n policies=list(expected_policies_by_listener.get(port, [])),\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if policy_set:\n policy_key = 'listener_{0}_policy'.format(port)\n ret['changes'][policy_key] = {\n 'old': list(actual_policies_by_listener.get(port, [])),\n 'new': list(expected_policies_by_listener.get(port, [])),\n }\n comment = \"Policy {0} was created on ELB {1} listener {2}\".format(\n expected_policies_by_listener[port], name, port)\n ret['comment'] = ' '.join([ret['comment'], comment])\n ret['result'] = True\n else:\n ret['result'] = False\n return ret\n\n for port in backends_to_update:\n policy_set = __salt__['boto_elb.set_backend_policy'](\n name=name,\n port=port,\n policies=list(expected_policies_by_backend.get(port, [])),\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if policy_set:\n policy_key = 'backend_{0}_policy'.format(port)\n ret['changes'][policy_key] = {\n 'old': list(actual_policies_by_backend.get(port, [])),\n 'new': list(expected_policies_by_backend.get(port, [])),\n }\n comment = \"Policy {0} was created on ELB {1} backend {2}\".format(\n expected_policies_by_backend[port], name, port)\n ret['comment'] = ' '.join([ret['comment'], comment])\n ret['result'] = True\n else:\n ret['result'] = False\n return ret\n\n if to_delete:\n for policy_name in to_delete:\n deleted = __salt__['boto_elb.delete_policy'](\n name=name,\n policy_name=policy_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if deleted:\n ret['changes'].setdefault(policy_name, {})['old'] = policy_name\n comment = \"Policy {0} was deleted from ELB {1}\".format(\n policy_name, name)\n ret['comment'] = ' '.join([ret['comment'], comment])\n ret['result'] = True\n else:\n ret['result'] = False\n return ret\n return ret\n", "def _tags_present(name, tags, region, key, keyid, profile):\n '''\n helper function to validate tags on elb\n '''\n ret = {'result': True, 'comment': '', 'changes': {}}\n if tags:\n lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)\n\n tags_to_add = tags\n tags_to_update = {}\n tags_to_remove = []\n if lb.get('tags'):\n for _tag in lb['tags']:\n if _tag not in tags.keys():\n if _tag not in tags_to_remove:\n tags_to_remove.append(_tag)\n else:\n if tags[_tag] != lb['tags'][_tag]:\n tags_to_update[_tag] = tags[_tag]\n tags_to_add.pop(_tag)\n if tags_to_remove:\n if __opts__['test']:\n msg = 'The following tag{0} set to be removed: {1}.'.format(\n ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove))\n ret['comment'] = ' '.join([ret['comment'], msg])\n ret['result'] = None\n else:\n _ret = __salt__['boto_elb.delete_tags'](\n name, tags_to_remove, region, key, keyid, profile)\n if not _ret:\n ret['result'] = False\n msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove)\n ret['comment'] = ' '.join([ret['comment'], msg])\n return ret\n if 'old' not in ret['changes']:\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}})\n for _tag in tags_to_remove:\n ret['changes']['old']['tags'][_tag] = lb['tags'][_tag]\n if tags_to_add or tags_to_update:\n if __opts__['test']:\n if tags_to_add:\n msg = 'The following tag{0} set to be added: {1}.'.format(\n ('s are' if len(tags_to_add.keys()) > 1 else ' is'),\n ', '.join(tags_to_add.keys()))\n ret['comment'] = ' '. join([ret['comment'], msg])\n ret['result'] = None\n if tags_to_update:\n msg = 'The following tag {0} set to be updated: {1}.'.format(\n ('values are' if len(tags_to_update.keys()) > 1 else 'value is'),\n ', '.join(tags_to_update.keys()))\n ret['comment'] = ' '.join([ret['comment'], msg])\n else:\n all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update)\n _ret = __salt__['boto_elb.set_tags'](\n name, all_tag_changes, region, key, keyid, profile)\n if not _ret:\n ret['result'] = False\n msg = 'Error attempting to set tags.'\n ret['comment'] = ' '.join([ret['comment'], msg])\n return ret\n if 'old' not in ret['changes']:\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}})\n if 'new' not in ret['changes']:\n ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}})\n for tag in all_tag_changes:\n ret['changes']['new']['tags'][tag] = tags[tag]\n if 'tags' in lb:\n if lb['tags']:\n if tag in lb['tags']:\n ret['changes']['old']['tags'][tag] = lb['tags'][tag]\n if not tags_to_update and not tags_to_remove and not tags_to_add:\n msg = 'Tags are already set.'\n ret['comment'] = ' '.join([ret['comment'], msg])\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Manage ELBs .. versionadded:: 2014.7.0 Create and destroy ELBs. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml elb.keyid: GKTADJGHEIQSXMKKRBJ08H elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - availability_zones: - us-east-1a - us-east-1c - us-east-1d - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - listeners: - elb_port: 443 instance_port: 80 elb_protocol: HTTPS instance_protocol: HTTP certificate: 'arn:aws:iam::1111111:server-certificate/mycert' policies: - my-ssl-policy - cookie-policy - elb_port: 8210 instance_port: 8210 elb_protocol: TCP - backends: - instance_port: 80 policies: - enable-proxy-protocol - health_check: target: 'HTTP:80/' - attributes: cross_zone_load_balancing: enabled: true access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 connecting_settings: idle_timeout: 60 - cnames: - name: mycname.example.com. zone: example.com. ttl: 60 - name: myothercname.example.com. zone: example.com. - security_groups: - my-security-group - policies: - policy_name: my-ssl-policy policy_type: SSLNegotiationPolicyType policy: Protocol-TLSv1.2: true Protocol-SSLv3: false Server-Defined-Cipher-Order: true ECDHE-ECDSA-AES128-GCM-SHA256: true - policy_name: cookie-policy policy_type: LBCookieStickinessPolicyType policy: {} # no policy means this is a session cookie - policy_name: enable-proxy-protocol policy_type: ProxyProtocolPolicyType policy: ProxyProtocol: true # Using a profile from pillars Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile # Passing in a profile Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's possible to specify attributes from pillars by specifying a pillar. You can override the values defined in the pillard by setting the attributes on the resource. The module will use the default pillar key 'boto_elb_attributes', which allows you to set default attributes for all ELB resources. Setting the attributes pillar: .. code-block:: yaml my_elb_attributes: cross_zone_load_balancing: enabled: true connection_draining: enabled: true timeout: 20 access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 Overriding the attribute values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - attributes_from_pillar: my_elb_attributes # override cross_zone_load_balancing:enabled - attributes: cross_zone_load_balancing: enabled: false - profile: myelbprofile It's possible to specify cloudwatch alarms that will be setup along with the ELB. Note the alarm name will be defined by the name attribute provided, plus the ELB resource name. .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_elb_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for a resource. Setting the alarms in a pillar: .. code-block:: yaml my_elb_alarm: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms_from_pillar: my_elb_alarm # override UnHealthyHostCount:attributes:threshold - alarms: UnHealthyHostCount: attributes: threshold: 2.0 Tags can also be set: .. versionadded:: 2016.3.0 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - tags: MyTag: 'My Tag Value' OtherTag: 'My Other Value' ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import hashlib import re import salt.utils.data import salt.utils.dictupdate import salt.utils.stringutils from salt.exceptions import SaltInvocationError from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_elb' if 'boto_elb.exists' in __salt__ else False def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not lb: msg = 'Could not find lb {0}'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) nodes = [value['instance_id'] for value in health if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not new: msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]')) log.debug(msg) ret.update({'comment': msg}) return ret if __opts__['test']: ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new) ret['result'] = None return ret state = __salt__['boto_elb.register_instances']( name, instances, region, key, keyid, profile) if state: msg = 'Load Balancer {0} has been changed'.format(name) log.info(msg) new = set().union(nodes, instances) ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes), 'new': '\n'.join(list(new))}}) else: msg = 'Load balancer {0} failed to add instances'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies' def _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)): raise SaltInvocationError('Exactly one of availability_zones, subnets, ' 'subnet_names must be provided as arguments.') if not listeners: listeners = [] for listener in listeners: if len(listener) < 3: raise SaltInvocationError('Listeners must have at minimum port,' ' instance_port and protocol values in' ' the provided list.') if 'elb_port' not in listener: raise SaltInvocationError('elb_port is a required value for' ' listeners.') if 'instance_port' not in listener: raise SaltInvocationError('instance_port is a required value for' ' listeners.') if 'elb_protocol' not in listener: raise SaltInvocationError('elb_protocol is a required value for' ' listeners.') listener['elb_protocol'] = listener['elb_protocol'].upper() if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener: raise SaltInvocationError('certificate is a required value for' ' listeners if HTTPS is set for' ' elb_protocol.') # best attempt at principle of least surprise here: # only use the default pillar in cases where we don't explicitly # define policies OR policies_from_pillar on a listener policies = listener.setdefault('policies', []) policies_pillar = listener.get('policies_from_pillar', None) if not policies and policies_pillar is None: policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY if policies_pillar: policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], []) # Look up subnet ids from names if provided if subnet_names: subnets = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnets.append(r['id']) _security_groups = None if subnets: vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile) vpc_id = vpc_id.get('vpc_id') if not vpc_id: ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets) ret['result'] = False return ret _security_groups = __salt__['boto_secgroup.convert_to_group_ids']( security_groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) if not _security_groups: ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups) ret['result'] = False return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_elb.create'](name=name, availability_zones=availability_zones, listeners=listeners, subnets=subnets, security_groups=_security_groups, scheme=scheme, region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes']['old'] = {'elb': None} ret['changes']['new'] = {'elb': name} ret['comment'] = 'ELB {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} ELB.'.format(name) else: ret['comment'] = 'ELB {0} present.'.format(name) _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _listeners_present(name, listeners, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if availability_zones: _ret = _zones_present(name, availability_zones, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret elif subnets: _ret = _subnets_present(name, subnets, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _listeners_present(name, listeners, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not listeners: listeners = [] expected_listeners_by_tuple = {} for l in listeners: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) expected_listeners_by_tuple[l_key] = l actual_listeners_by_tuple = {} for l in lb['listeners']: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) actual_listeners_by_tuple[l_key] = l to_delete = [] to_create = [] for t, l in six.iteritems(expected_listeners_by_tuple): if t not in actual_listeners_by_tuple: to_create.append(l) for t, l in six.iteritems(actual_listeners_by_tuple): if t not in expected_listeners_by_tuple: to_delete.append(l) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have listeners modified:'.format(name)) for listener in to_create: msg.append('Listener {0} added.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) for listener in to_delete: msg.append('Listener {0} deleted.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) ret['result'] = None else: msg.append('Listeners already set on ELB {0}.'.format(name)) ret['comment'] = ' '.join(msg) return ret if to_delete: ports = [l['elb_port'] for l in to_delete] deleted = __salt__['boto_elb.delete_listeners'](name, ports, region, key, keyid, profile) if deleted: ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name) ret['result'] = False if to_create: created = __salt__['boto_elb.create_listeners'](name, to_create, region, key, keyid, profile) if created: msg = 'Created listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to create listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False if to_create or to_delete: ret['changes']['listeners'] = {} ret['changes']['listeners']['old'] = lb['listeners'] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['listeners']['new'] = lb['listeners'] else: ret['comment'] = 'Listeners already set on ELB {0}.'.format(name) return ret def _security_groups_present(name, security_groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not security_groups: security_groups = [] change_needed = False if set(security_groups) != set(lb['security_groups']): change_needed = True if change_needed: if __opts__['test']: ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name) ret['result'] = None return ret changed = __salt__['boto_elb.apply_security_groups']( name, security_groups, region, key, keyid, profile ) if changed: ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name) ret['result'] = False ret['changes']['old'] = {'security_groups': lb['security_groups']} ret['changes']['new'] = {'security_groups': security_groups} else: ret['comment'] = 'security_groups already set on ELB {0}.'.format(name) return ret def _attributes_present(name, attributes, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid, profile) if not _attributes: ret['result'] = False ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name) return ret attrs_to_set = [] if 'cross_zone_load_balancing' in attributes: czlb = attributes['cross_zone_load_balancing'] _czlb = _attributes['cross_zone_load_balancing'] if czlb['enabled'] != _czlb['enabled']: attrs_to_set.append('cross_zone_load_balancing') if 'connection_draining' in attributes: cd = attributes['connection_draining'] _cd = _attributes['connection_draining'] if (cd['enabled'] != _cd['enabled'] or cd.get('timeout', 300) != _cd.get('timeout')): attrs_to_set.append('connection_draining') if 'connecting_settings' in attributes: cs = attributes['connecting_settings'] _cs = _attributes['connecting_settings'] if cs['idle_timeout'] != _cs['idle_timeout']: attrs_to_set.append('connecting_settings') if 'access_log' in attributes: for attr, val in six.iteritems(attributes['access_log']): if six.text_type(_attributes['access_log'][attr]) != six.text_type(val): attrs_to_set.append('access_log') if 's3_bucket_prefix' in attributes['access_log']: sbp = attributes['access_log']['s3_bucket_prefix'] if sbp.startswith('/') or sbp.endswith('/'): raise SaltInvocationError('s3_bucket_prefix can not start or' ' end with /.') if attrs_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have attributes set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_attributes'](name, attributes, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'attributes': _attributes} ret['changes']['new'] = {'attributes': attributes} ret['comment'] = 'Set attributes on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name) else: ret['comment'] = 'Attributes already set on ELB {0}.'.format(name) return ret def _health_check_present(name, health_check, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not health_check: health_check = {} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) if not _health_check: ret['result'] = False ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name) return ret need_to_set = False for attr, val in six.iteritems(health_check): if six.text_type(_health_check[attr]) != six.text_type(val): need_to_set = True if need_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have health check set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_health_check'](name, health_check, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'health_check': _health_check} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) ret['changes']['new'] = {'health_check': _health_check} ret['comment'] = 'Set health check on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name) else: ret['comment'] = 'Health check already set on ELB {0}.'.format(name) return ret def _zones_present(name, availability_zones, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _zones = lb['availability_zones'] for zone in availability_zones: if zone not in _zones: to_enable.append(zone) for zone in _zones: if zone not in availability_zones: to_disable.append(zone) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have availability zones set.'.format(name) ret['result'] = None return ret if to_enable: enabled = __salt__['boto_elb.enable_availability_zones']( name, to_enable, region, key, keyid, profile) if enabled: ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name) ret['result'] = False if to_disable: disabled = __salt__['boto_elb.disable_availability_zones']( name, to_disable, region, key, keyid, profile) if disabled: msg = 'Disabled availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to disable availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False ret['changes']['old'] = {'availability_zones': lb['availability_zones']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'availability_zones': lb['availability_zones']} else: ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name) return ret def _subnets_present(name, subnets, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not subnets: subnets = [] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _subnets = lb['subnets'] for subnet in subnets: if subnet not in _subnets: to_enable.append(subnet) for subnet in _subnets: if subnet not in subnets: to_disable.append(subnet) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have subnets set.'.format(name) ret['result'] = None return ret if to_enable: attached = __salt__['boto_elb.attach_subnets'](name, to_enable, region, key, keyid, profile) if attached: ret['comment'] = 'Attached subnets on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name) ret['result'] = False if to_disable: detached = __salt__['boto_elb.detach_subnets'](name, to_disable, region, key, keyid, profile) if detached: ret['comment'] = ' '.join([ ret['comment'], 'Detached subnets on {0} ELB.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to detach subnets on {0} ELB.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'subnets': lb['subnets']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'subnets': lb['subnets']} else: ret['comment'] = 'Subnets already set on ELB {0}.'.format(name) return ret def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' current = __salt__['config.option'](alarms_from_pillar, {}) if alarms: current = salt.utils.dictupdate.update(current, alarms) ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(current): info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } # No test=False cluase needed since the state handles that itself... results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results.get('result'): ret["result"] = results["result"] if results.get("changes", {}) != {}: ret["changes"][info["name"]] = results["changes"] if "comment" in results: ret["comment"] += results["comment"] return ret def _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile): '''helper method for present. ensure that ELB policies are set''' if policies is None: policies = [] pillar_policies = __salt__['config.option'](policies_from_pillar, []) policies = policies + pillar_policies if backends is None: backends = [] # check for policy name uniqueness and correct type policy_names = set() for p in policies: if 'policy_name' not in p: raise SaltInvocationError('policy_name is a required value for ' 'policies.') if 'policy_type' not in p: raise SaltInvocationError('policy_type is a required value for ' 'policies.') if 'policy' not in p: raise SaltInvocationError('policy is a required value for ' 'listeners.') # check for unique policy names if p['policy_name'] in policy_names: raise SaltInvocationError('Policy names must be unique: policy {0}' ' is declared twice.'.format(p['policy_name'])) policy_names.add(p['policy_name']) # check that listeners refer to valid policy names for l in listeners: for p in l.get('policies', []): if p not in policy_names: raise SaltInvocationError('Listener {0} on ELB {1} refers to ' 'undefined policy {2}.'.format(l['elb_port'], name, p)) # check that backends refer to valid policy names for b in backends: for p in b.get('policies', []): if p not in policy_names: raise SaltInvocationError('Backend {0} on ELB {1} refers to ' 'undefined policy ' '{2}.'.format(b['instance_port'], name, p)) ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret # Policies have two names: # - a short name ('name') that's only the policy name (e.g. testpolicy) # - a canonical name ('cname') that contains the policy type and hash # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524) policies_by_cname = {} cnames_by_name = {} for p in policies: cname = _policy_cname(p) policies_by_cname[cname] = p cnames_by_name[p['policy_name']] = cname expected_policy_names = policies_by_cname.keys() actual_policy_names = lb['policies'] # This is sadly a huge hack to get around the fact that AWS assigns a # default SSLNegotiationPolicyType policy (with the naming scheme # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an # explicit policy set. If we don't keep track of the default policies and # explicitly exclude them from deletion, orchestration will fail because we # attempt to delete the default policy that's being used by listeners that # were created with no explicit policy. default_aws_policies = set() expected_policies_by_listener = {} for l in listeners: expected_policies_by_listener[l['elb_port']] = set( [cnames_by_name[p] for p in l.get('policies', [])]) actual_policies_by_listener = {} for l in lb['listeners']: listener_policies = set(l.get('policies', [])) actual_policies_by_listener[l['elb_port']] = listener_policies # Determine if any actual listener policies look like default policies, # so we can exclude them from deletion below (see note about this hack # above). for p in listener_policies: if re.match(r'^ELBSecurityPolicy-\d{4}-\d{2}$', p): default_aws_policies.add(p) expected_policies_by_backend = {} for b in backends: expected_policies_by_backend[b['instance_port']] = set( [cnames_by_name[p] for p in b.get('policies', [])]) actual_policies_by_backend = {} for b in lb['backends']: backend_policies = set(b.get('policies', [])) actual_policies_by_backend[b['instance_port']] = backend_policies to_delete = [] to_create = [] for policy_name in expected_policy_names: if policy_name not in actual_policy_names: to_create.append(policy_name) for policy_name in actual_policy_names: if policy_name not in expected_policy_names: if policy_name not in default_aws_policies: to_delete.append(policy_name) listeners_to_update = set() for port, policies in six.iteritems(expected_policies_by_listener): if policies != actual_policies_by_listener.get(port, set()): listeners_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_listener): if policies != expected_policies_by_listener.get(port, set()): listeners_to_update.add(port) backends_to_update = set() for port, policies in six.iteritems(expected_policies_by_backend): if policies != actual_policies_by_backend.get(port, set()): backends_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_backend): if policies != expected_policies_by_backend.get(port, set()): backends_to_update.add(port) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have policies modified:'.format(name)) for policy in to_create: msg.append('Policy {0} added.'.format(policy)) for policy in to_delete: msg.append('Policy {0} deleted.'.format(policy)) ret['result'] = None else: msg.append('Policies already set on ELB {0}.'.format(name)) for listener in listeners_to_update: msg.append('Listener {0} policies updated.'.format(listener)) for backend in backends_to_update: msg.append('Backend {0} policies updated.'.format(backend)) ret['comment'] = ' '.join(msg) return ret if to_create: for policy_name in to_create: created = __salt__['boto_elb.create_policy']( name=name, policy_name=policy_name, policy_type=policies_by_cname[policy_name]['policy_type'], policy=policies_by_cname[policy_name]['policy'], region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes'].setdefault(policy_name, {})['new'] = policy_name comment = "Policy {0} was created on ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in listeners_to_update: policy_set = __salt__['boto_elb.set_listener_policy']( name=name, port=port, policies=list(expected_policies_by_listener.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'listener_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_listener.get(port, [])), 'new': list(expected_policies_by_listener.get(port, [])), } comment = "Policy {0} was created on ELB {1} listener {2}".format( expected_policies_by_listener[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in backends_to_update: policy_set = __salt__['boto_elb.set_backend_policy']( name=name, port=port, policies=list(expected_policies_by_backend.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'backend_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_backend.get(port, [])), 'new': list(expected_policies_by_backend.get(port, [])), } comment = "Policy {0} was created on ELB {1} backend {2}".format( expected_policies_by_backend[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret if to_delete: for policy_name in to_delete: deleted = __salt__['boto_elb.delete_policy']( name=name, policy_name=policy_name, region=region, key=key, keyid=keyid, profile=profile) if deleted: ret['changes'].setdefault(policy_name, {})['old'] = policy_name comment = "Policy {0} was deleted from ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret return ret def _policy_cname(policy_dict): policy_name = policy_dict['policy_name'] policy_type = policy_dict['policy_type'] policy = policy_dict['policy'] canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0]))) policy_hash = hashlib.md5( salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function if policy_type.endswith('Type'): policy_type = policy_type[:-4] return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash) def absent(name, region=None, key=None, keyid=None, profile=None): ''' Ensure an ELB does not exist name name of the ELB ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_elb.delete'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'elb': name} ret['changes']['new'] = {'elb': None} ret['comment'] = 'ELB {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} ELB.'.format(name) else: ret['comment'] = '{0} ELB does not exist.'.format(name) return ret def _tags_present(name, tags, region, key, keyid, profile): ''' helper function to validate tags on elb ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) tags_to_add = tags tags_to_update = {} tags_to_remove = [] if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: tags_to_remove.append(_tag) else: if tags[_tag] != lb['tags'][_tag]: tags_to_update[_tag] = tags[_tag] tags_to_add.pop(_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: _ret = __salt__['boto_elb.delete_tags']( name, tags_to_remove, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove) ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for _tag in tags_to_remove: ret['changes']['old']['tags'][_tag] = lb['tags'][_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '. join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) else: all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update) _ret = __salt__['boto_elb.set_tags']( name, all_tag_changes, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in lb: if lb['tags']: if tag in lb['tags']: ret['changes']['old']['tags'][tag] = lb['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: msg = 'Tags are already set.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret
saltstack/salt
salt/states/boto_elb.py
register_instances
python
def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not lb: msg = 'Could not find lb {0}'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) nodes = [value['instance_id'] for value in health if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not new: msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]')) log.debug(msg) ret.update({'comment': msg}) return ret if __opts__['test']: ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new) ret['result'] = None return ret state = __salt__['boto_elb.register_instances']( name, instances, region, key, keyid, profile) if state: msg = 'Load Balancer {0} has been changed'.format(name) log.info(msg) new = set().union(nodes, instances) ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes), 'new': '\n'.join(list(new))}}) else: msg = 'Load balancer {0} failed to add instances'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret
Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_elb.py#L488-L550
null
# -*- coding: utf-8 -*- ''' Manage ELBs .. versionadded:: 2014.7.0 Create and destroy ELBs. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml elb.keyid: GKTADJGHEIQSXMKKRBJ08H elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - availability_zones: - us-east-1a - us-east-1c - us-east-1d - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - listeners: - elb_port: 443 instance_port: 80 elb_protocol: HTTPS instance_protocol: HTTP certificate: 'arn:aws:iam::1111111:server-certificate/mycert' policies: - my-ssl-policy - cookie-policy - elb_port: 8210 instance_port: 8210 elb_protocol: TCP - backends: - instance_port: 80 policies: - enable-proxy-protocol - health_check: target: 'HTTP:80/' - attributes: cross_zone_load_balancing: enabled: true access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 connecting_settings: idle_timeout: 60 - cnames: - name: mycname.example.com. zone: example.com. ttl: 60 - name: myothercname.example.com. zone: example.com. - security_groups: - my-security-group - policies: - policy_name: my-ssl-policy policy_type: SSLNegotiationPolicyType policy: Protocol-TLSv1.2: true Protocol-SSLv3: false Server-Defined-Cipher-Order: true ECDHE-ECDSA-AES128-GCM-SHA256: true - policy_name: cookie-policy policy_type: LBCookieStickinessPolicyType policy: {} # no policy means this is a session cookie - policy_name: enable-proxy-protocol policy_type: ProxyProtocolPolicyType policy: ProxyProtocol: true # Using a profile from pillars Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile # Passing in a profile Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's possible to specify attributes from pillars by specifying a pillar. You can override the values defined in the pillard by setting the attributes on the resource. The module will use the default pillar key 'boto_elb_attributes', which allows you to set default attributes for all ELB resources. Setting the attributes pillar: .. code-block:: yaml my_elb_attributes: cross_zone_load_balancing: enabled: true connection_draining: enabled: true timeout: 20 access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 Overriding the attribute values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - attributes_from_pillar: my_elb_attributes # override cross_zone_load_balancing:enabled - attributes: cross_zone_load_balancing: enabled: false - profile: myelbprofile It's possible to specify cloudwatch alarms that will be setup along with the ELB. Note the alarm name will be defined by the name attribute provided, plus the ELB resource name. .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_elb_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for a resource. Setting the alarms in a pillar: .. code-block:: yaml my_elb_alarm: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms_from_pillar: my_elb_alarm # override UnHealthyHostCount:attributes:threshold - alarms: UnHealthyHostCount: attributes: threshold: 2.0 Tags can also be set: .. versionadded:: 2016.3.0 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - tags: MyTag: 'My Tag Value' OtherTag: 'My Other Value' ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import hashlib import re import salt.utils.data import salt.utils.dictupdate import salt.utils.stringutils from salt.exceptions import SaltInvocationError from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_elb' if 'boto_elb.exists' in __salt__ else False def present(name, listeners, availability_zones=None, subnets=None, subnet_names=None, security_groups=None, scheme='internet-facing', health_check=None, attributes=None, attributes_from_pillar="boto_elb_attributes", cnames=None, alarms=None, alarms_from_pillar="boto_elb_alarms", policies=None, policies_from_pillar="boto_elb_policies", backends=None, region=None, key=None, keyid=None, profile=None, wait_for_sync=True, tags=None, instance_ids=None, instance_names=None): ''' Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids. ''' # load data from attributes_from_pillar and merge with attributes tmp = __salt__['config.option'](attributes_from_pillar, {}) attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not isinstance(security_groups, (six.string_types, list, type(None))): msg = ("The 'security_group' parameter must be either a list or a " "comma-separated string.") log.error(msg) ret.update({'comment': msg, 'result': False}) return ret if isinstance(security_groups, six.string_types): security_groups = security_groups.split(',') _ret = _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile) ret.update({'changes': _ret['changes'], 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists and __opts__['test']: return ret if attributes: _ret = _attributes_present(name, attributes, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _health_check_present(name, health_check, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if cnames: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if lb: for cname in cnames: _ret = None dns_provider = 'boto_route53' cname.update({'record_type': 'CNAME', 'value': lb['dns_name']}) if 'provider' in cname: dns_provider = cname.pop('provider') if dns_provider == 'boto_route53': for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'): cname[p] = locals().get(p) if p not in cname else cname[p] _ret = __states__['boto_route53.present'](**cname) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _tags_present(name, tags, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if not instance_ids: instance_ids = [] if instance_names: # AWS borks on adding instances in "non-running" states, so filter 'em out. running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') for n in instance_names: instance_ids += __salt__['boto_ec2.find_instances']( name=n, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) # Backwards compat: Only touch attached instances if requested (e.g. if some are defined). if instance_ids: if __opts__['test']: if __salt__['boto_elb.set_instances']( name, instance_ids, True, region, key, keyid, profile): ret['comment'] += ' ELB {0} instances would be updated.'.format(name) ret['result'] = None else: success = __salt__['boto_elb.set_instances']( name, instance_ids, False, region, key, keyid, profile) if not success: ret['comment'] += "Failed to set requested instances." ret['result'] = False return ret DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies' def _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)): raise SaltInvocationError('Exactly one of availability_zones, subnets, ' 'subnet_names must be provided as arguments.') if not listeners: listeners = [] for listener in listeners: if len(listener) < 3: raise SaltInvocationError('Listeners must have at minimum port,' ' instance_port and protocol values in' ' the provided list.') if 'elb_port' not in listener: raise SaltInvocationError('elb_port is a required value for' ' listeners.') if 'instance_port' not in listener: raise SaltInvocationError('instance_port is a required value for' ' listeners.') if 'elb_protocol' not in listener: raise SaltInvocationError('elb_protocol is a required value for' ' listeners.') listener['elb_protocol'] = listener['elb_protocol'].upper() if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener: raise SaltInvocationError('certificate is a required value for' ' listeners if HTTPS is set for' ' elb_protocol.') # best attempt at principle of least surprise here: # only use the default pillar in cases where we don't explicitly # define policies OR policies_from_pillar on a listener policies = listener.setdefault('policies', []) policies_pillar = listener.get('policies_from_pillar', None) if not policies and policies_pillar is None: policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY if policies_pillar: policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], []) # Look up subnet ids from names if provided if subnet_names: subnets = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnets.append(r['id']) _security_groups = None if subnets: vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile) vpc_id = vpc_id.get('vpc_id') if not vpc_id: ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets) ret['result'] = False return ret _security_groups = __salt__['boto_secgroup.convert_to_group_ids']( security_groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) if not _security_groups: ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups) ret['result'] = False return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_elb.create'](name=name, availability_zones=availability_zones, listeners=listeners, subnets=subnets, security_groups=_security_groups, scheme=scheme, region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes']['old'] = {'elb': None} ret['changes']['new'] = {'elb': name} ret['comment'] = 'ELB {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} ELB.'.format(name) else: ret['comment'] = 'ELB {0} present.'.format(name) _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _listeners_present(name, listeners, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if availability_zones: _ret = _zones_present(name, availability_zones, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret elif subnets: _ret = _subnets_present(name, subnets, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _listeners_present(name, listeners, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not listeners: listeners = [] expected_listeners_by_tuple = {} for l in listeners: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) expected_listeners_by_tuple[l_key] = l actual_listeners_by_tuple = {} for l in lb['listeners']: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) actual_listeners_by_tuple[l_key] = l to_delete = [] to_create = [] for t, l in six.iteritems(expected_listeners_by_tuple): if t not in actual_listeners_by_tuple: to_create.append(l) for t, l in six.iteritems(actual_listeners_by_tuple): if t not in expected_listeners_by_tuple: to_delete.append(l) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have listeners modified:'.format(name)) for listener in to_create: msg.append('Listener {0} added.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) for listener in to_delete: msg.append('Listener {0} deleted.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) ret['result'] = None else: msg.append('Listeners already set on ELB {0}.'.format(name)) ret['comment'] = ' '.join(msg) return ret if to_delete: ports = [l['elb_port'] for l in to_delete] deleted = __salt__['boto_elb.delete_listeners'](name, ports, region, key, keyid, profile) if deleted: ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name) ret['result'] = False if to_create: created = __salt__['boto_elb.create_listeners'](name, to_create, region, key, keyid, profile) if created: msg = 'Created listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to create listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False if to_create or to_delete: ret['changes']['listeners'] = {} ret['changes']['listeners']['old'] = lb['listeners'] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['listeners']['new'] = lb['listeners'] else: ret['comment'] = 'Listeners already set on ELB {0}.'.format(name) return ret def _security_groups_present(name, security_groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not security_groups: security_groups = [] change_needed = False if set(security_groups) != set(lb['security_groups']): change_needed = True if change_needed: if __opts__['test']: ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name) ret['result'] = None return ret changed = __salt__['boto_elb.apply_security_groups']( name, security_groups, region, key, keyid, profile ) if changed: ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name) ret['result'] = False ret['changes']['old'] = {'security_groups': lb['security_groups']} ret['changes']['new'] = {'security_groups': security_groups} else: ret['comment'] = 'security_groups already set on ELB {0}.'.format(name) return ret def _attributes_present(name, attributes, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid, profile) if not _attributes: ret['result'] = False ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name) return ret attrs_to_set = [] if 'cross_zone_load_balancing' in attributes: czlb = attributes['cross_zone_load_balancing'] _czlb = _attributes['cross_zone_load_balancing'] if czlb['enabled'] != _czlb['enabled']: attrs_to_set.append('cross_zone_load_balancing') if 'connection_draining' in attributes: cd = attributes['connection_draining'] _cd = _attributes['connection_draining'] if (cd['enabled'] != _cd['enabled'] or cd.get('timeout', 300) != _cd.get('timeout')): attrs_to_set.append('connection_draining') if 'connecting_settings' in attributes: cs = attributes['connecting_settings'] _cs = _attributes['connecting_settings'] if cs['idle_timeout'] != _cs['idle_timeout']: attrs_to_set.append('connecting_settings') if 'access_log' in attributes: for attr, val in six.iteritems(attributes['access_log']): if six.text_type(_attributes['access_log'][attr]) != six.text_type(val): attrs_to_set.append('access_log') if 's3_bucket_prefix' in attributes['access_log']: sbp = attributes['access_log']['s3_bucket_prefix'] if sbp.startswith('/') or sbp.endswith('/'): raise SaltInvocationError('s3_bucket_prefix can not start or' ' end with /.') if attrs_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have attributes set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_attributes'](name, attributes, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'attributes': _attributes} ret['changes']['new'] = {'attributes': attributes} ret['comment'] = 'Set attributes on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name) else: ret['comment'] = 'Attributes already set on ELB {0}.'.format(name) return ret def _health_check_present(name, health_check, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not health_check: health_check = {} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) if not _health_check: ret['result'] = False ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name) return ret need_to_set = False for attr, val in six.iteritems(health_check): if six.text_type(_health_check[attr]) != six.text_type(val): need_to_set = True if need_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have health check set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_health_check'](name, health_check, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'health_check': _health_check} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) ret['changes']['new'] = {'health_check': _health_check} ret['comment'] = 'Set health check on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name) else: ret['comment'] = 'Health check already set on ELB {0}.'.format(name) return ret def _zones_present(name, availability_zones, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _zones = lb['availability_zones'] for zone in availability_zones: if zone not in _zones: to_enable.append(zone) for zone in _zones: if zone not in availability_zones: to_disable.append(zone) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have availability zones set.'.format(name) ret['result'] = None return ret if to_enable: enabled = __salt__['boto_elb.enable_availability_zones']( name, to_enable, region, key, keyid, profile) if enabled: ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name) ret['result'] = False if to_disable: disabled = __salt__['boto_elb.disable_availability_zones']( name, to_disable, region, key, keyid, profile) if disabled: msg = 'Disabled availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to disable availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False ret['changes']['old'] = {'availability_zones': lb['availability_zones']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'availability_zones': lb['availability_zones']} else: ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name) return ret def _subnets_present(name, subnets, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not subnets: subnets = [] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _subnets = lb['subnets'] for subnet in subnets: if subnet not in _subnets: to_enable.append(subnet) for subnet in _subnets: if subnet not in subnets: to_disable.append(subnet) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have subnets set.'.format(name) ret['result'] = None return ret if to_enable: attached = __salt__['boto_elb.attach_subnets'](name, to_enable, region, key, keyid, profile) if attached: ret['comment'] = 'Attached subnets on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name) ret['result'] = False if to_disable: detached = __salt__['boto_elb.detach_subnets'](name, to_disable, region, key, keyid, profile) if detached: ret['comment'] = ' '.join([ ret['comment'], 'Detached subnets on {0} ELB.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to detach subnets on {0} ELB.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'subnets': lb['subnets']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'subnets': lb['subnets']} else: ret['comment'] = 'Subnets already set on ELB {0}.'.format(name) return ret def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' current = __salt__['config.option'](alarms_from_pillar, {}) if alarms: current = salt.utils.dictupdate.update(current, alarms) ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(current): info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } # No test=False cluase needed since the state handles that itself... results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results.get('result'): ret["result"] = results["result"] if results.get("changes", {}) != {}: ret["changes"][info["name"]] = results["changes"] if "comment" in results: ret["comment"] += results["comment"] return ret def _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile): '''helper method for present. ensure that ELB policies are set''' if policies is None: policies = [] pillar_policies = __salt__['config.option'](policies_from_pillar, []) policies = policies + pillar_policies if backends is None: backends = [] # check for policy name uniqueness and correct type policy_names = set() for p in policies: if 'policy_name' not in p: raise SaltInvocationError('policy_name is a required value for ' 'policies.') if 'policy_type' not in p: raise SaltInvocationError('policy_type is a required value for ' 'policies.') if 'policy' not in p: raise SaltInvocationError('policy is a required value for ' 'listeners.') # check for unique policy names if p['policy_name'] in policy_names: raise SaltInvocationError('Policy names must be unique: policy {0}' ' is declared twice.'.format(p['policy_name'])) policy_names.add(p['policy_name']) # check that listeners refer to valid policy names for l in listeners: for p in l.get('policies', []): if p not in policy_names: raise SaltInvocationError('Listener {0} on ELB {1} refers to ' 'undefined policy {2}.'.format(l['elb_port'], name, p)) # check that backends refer to valid policy names for b in backends: for p in b.get('policies', []): if p not in policy_names: raise SaltInvocationError('Backend {0} on ELB {1} refers to ' 'undefined policy ' '{2}.'.format(b['instance_port'], name, p)) ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret # Policies have two names: # - a short name ('name') that's only the policy name (e.g. testpolicy) # - a canonical name ('cname') that contains the policy type and hash # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524) policies_by_cname = {} cnames_by_name = {} for p in policies: cname = _policy_cname(p) policies_by_cname[cname] = p cnames_by_name[p['policy_name']] = cname expected_policy_names = policies_by_cname.keys() actual_policy_names = lb['policies'] # This is sadly a huge hack to get around the fact that AWS assigns a # default SSLNegotiationPolicyType policy (with the naming scheme # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an # explicit policy set. If we don't keep track of the default policies and # explicitly exclude them from deletion, orchestration will fail because we # attempt to delete the default policy that's being used by listeners that # were created with no explicit policy. default_aws_policies = set() expected_policies_by_listener = {} for l in listeners: expected_policies_by_listener[l['elb_port']] = set( [cnames_by_name[p] for p in l.get('policies', [])]) actual_policies_by_listener = {} for l in lb['listeners']: listener_policies = set(l.get('policies', [])) actual_policies_by_listener[l['elb_port']] = listener_policies # Determine if any actual listener policies look like default policies, # so we can exclude them from deletion below (see note about this hack # above). for p in listener_policies: if re.match(r'^ELBSecurityPolicy-\d{4}-\d{2}$', p): default_aws_policies.add(p) expected_policies_by_backend = {} for b in backends: expected_policies_by_backend[b['instance_port']] = set( [cnames_by_name[p] for p in b.get('policies', [])]) actual_policies_by_backend = {} for b in lb['backends']: backend_policies = set(b.get('policies', [])) actual_policies_by_backend[b['instance_port']] = backend_policies to_delete = [] to_create = [] for policy_name in expected_policy_names: if policy_name not in actual_policy_names: to_create.append(policy_name) for policy_name in actual_policy_names: if policy_name not in expected_policy_names: if policy_name not in default_aws_policies: to_delete.append(policy_name) listeners_to_update = set() for port, policies in six.iteritems(expected_policies_by_listener): if policies != actual_policies_by_listener.get(port, set()): listeners_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_listener): if policies != expected_policies_by_listener.get(port, set()): listeners_to_update.add(port) backends_to_update = set() for port, policies in six.iteritems(expected_policies_by_backend): if policies != actual_policies_by_backend.get(port, set()): backends_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_backend): if policies != expected_policies_by_backend.get(port, set()): backends_to_update.add(port) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have policies modified:'.format(name)) for policy in to_create: msg.append('Policy {0} added.'.format(policy)) for policy in to_delete: msg.append('Policy {0} deleted.'.format(policy)) ret['result'] = None else: msg.append('Policies already set on ELB {0}.'.format(name)) for listener in listeners_to_update: msg.append('Listener {0} policies updated.'.format(listener)) for backend in backends_to_update: msg.append('Backend {0} policies updated.'.format(backend)) ret['comment'] = ' '.join(msg) return ret if to_create: for policy_name in to_create: created = __salt__['boto_elb.create_policy']( name=name, policy_name=policy_name, policy_type=policies_by_cname[policy_name]['policy_type'], policy=policies_by_cname[policy_name]['policy'], region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes'].setdefault(policy_name, {})['new'] = policy_name comment = "Policy {0} was created on ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in listeners_to_update: policy_set = __salt__['boto_elb.set_listener_policy']( name=name, port=port, policies=list(expected_policies_by_listener.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'listener_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_listener.get(port, [])), 'new': list(expected_policies_by_listener.get(port, [])), } comment = "Policy {0} was created on ELB {1} listener {2}".format( expected_policies_by_listener[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in backends_to_update: policy_set = __salt__['boto_elb.set_backend_policy']( name=name, port=port, policies=list(expected_policies_by_backend.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'backend_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_backend.get(port, [])), 'new': list(expected_policies_by_backend.get(port, [])), } comment = "Policy {0} was created on ELB {1} backend {2}".format( expected_policies_by_backend[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret if to_delete: for policy_name in to_delete: deleted = __salt__['boto_elb.delete_policy']( name=name, policy_name=policy_name, region=region, key=key, keyid=keyid, profile=profile) if deleted: ret['changes'].setdefault(policy_name, {})['old'] = policy_name comment = "Policy {0} was deleted from ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret return ret def _policy_cname(policy_dict): policy_name = policy_dict['policy_name'] policy_type = policy_dict['policy_type'] policy = policy_dict['policy'] canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0]))) policy_hash = hashlib.md5( salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function if policy_type.endswith('Type'): policy_type = policy_type[:-4] return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash) def absent(name, region=None, key=None, keyid=None, profile=None): ''' Ensure an ELB does not exist name name of the ELB ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_elb.delete'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'elb': name} ret['changes']['new'] = {'elb': None} ret['comment'] = 'ELB {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} ELB.'.format(name) else: ret['comment'] = '{0} ELB does not exist.'.format(name) return ret def _tags_present(name, tags, region, key, keyid, profile): ''' helper function to validate tags on elb ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) tags_to_add = tags tags_to_update = {} tags_to_remove = [] if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: tags_to_remove.append(_tag) else: if tags[_tag] != lb['tags'][_tag]: tags_to_update[_tag] = tags[_tag] tags_to_add.pop(_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: _ret = __salt__['boto_elb.delete_tags']( name, tags_to_remove, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove) ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for _tag in tags_to_remove: ret['changes']['old']['tags'][_tag] = lb['tags'][_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '. join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) else: all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update) _ret = __salt__['boto_elb.set_tags']( name, all_tag_changes, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in lb: if lb['tags']: if tag in lb['tags']: ret['changes']['old']['tags'][tag] = lb['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: msg = 'Tags are already set.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret
saltstack/salt
salt/states/boto_elb.py
_alarms_present
python
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' current = __salt__['config.option'](alarms_from_pillar, {}) if alarms: current = salt.utils.dictupdate.update(current, alarms) ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(current): info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } # No test=False cluase needed since the state handles that itself... results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results.get('result'): ret["result"] = results["result"] if results.get("changes", {}) != {}: ret["changes"][info["name"]] = results["changes"] if "comment" in results: ret["comment"] += results["comment"] return ret
helper method for present. ensure that cloudwatch_alarms are set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_elb.py#L987-L1013
null
# -*- coding: utf-8 -*- ''' Manage ELBs .. versionadded:: 2014.7.0 Create and destroy ELBs. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml elb.keyid: GKTADJGHEIQSXMKKRBJ08H elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - availability_zones: - us-east-1a - us-east-1c - us-east-1d - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - listeners: - elb_port: 443 instance_port: 80 elb_protocol: HTTPS instance_protocol: HTTP certificate: 'arn:aws:iam::1111111:server-certificate/mycert' policies: - my-ssl-policy - cookie-policy - elb_port: 8210 instance_port: 8210 elb_protocol: TCP - backends: - instance_port: 80 policies: - enable-proxy-protocol - health_check: target: 'HTTP:80/' - attributes: cross_zone_load_balancing: enabled: true access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 connecting_settings: idle_timeout: 60 - cnames: - name: mycname.example.com. zone: example.com. ttl: 60 - name: myothercname.example.com. zone: example.com. - security_groups: - my-security-group - policies: - policy_name: my-ssl-policy policy_type: SSLNegotiationPolicyType policy: Protocol-TLSv1.2: true Protocol-SSLv3: false Server-Defined-Cipher-Order: true ECDHE-ECDSA-AES128-GCM-SHA256: true - policy_name: cookie-policy policy_type: LBCookieStickinessPolicyType policy: {} # no policy means this is a session cookie - policy_name: enable-proxy-protocol policy_type: ProxyProtocolPolicyType policy: ProxyProtocol: true # Using a profile from pillars Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile # Passing in a profile Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's possible to specify attributes from pillars by specifying a pillar. You can override the values defined in the pillard by setting the attributes on the resource. The module will use the default pillar key 'boto_elb_attributes', which allows you to set default attributes for all ELB resources. Setting the attributes pillar: .. code-block:: yaml my_elb_attributes: cross_zone_load_balancing: enabled: true connection_draining: enabled: true timeout: 20 access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 Overriding the attribute values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - attributes_from_pillar: my_elb_attributes # override cross_zone_load_balancing:enabled - attributes: cross_zone_load_balancing: enabled: false - profile: myelbprofile It's possible to specify cloudwatch alarms that will be setup along with the ELB. Note the alarm name will be defined by the name attribute provided, plus the ELB resource name. .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_elb_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for a resource. Setting the alarms in a pillar: .. code-block:: yaml my_elb_alarm: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms_from_pillar: my_elb_alarm # override UnHealthyHostCount:attributes:threshold - alarms: UnHealthyHostCount: attributes: threshold: 2.0 Tags can also be set: .. versionadded:: 2016.3.0 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - tags: MyTag: 'My Tag Value' OtherTag: 'My Other Value' ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import hashlib import re import salt.utils.data import salt.utils.dictupdate import salt.utils.stringutils from salt.exceptions import SaltInvocationError from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_elb' if 'boto_elb.exists' in __salt__ else False def present(name, listeners, availability_zones=None, subnets=None, subnet_names=None, security_groups=None, scheme='internet-facing', health_check=None, attributes=None, attributes_from_pillar="boto_elb_attributes", cnames=None, alarms=None, alarms_from_pillar="boto_elb_alarms", policies=None, policies_from_pillar="boto_elb_policies", backends=None, region=None, key=None, keyid=None, profile=None, wait_for_sync=True, tags=None, instance_ids=None, instance_names=None): ''' Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids. ''' # load data from attributes_from_pillar and merge with attributes tmp = __salt__['config.option'](attributes_from_pillar, {}) attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not isinstance(security_groups, (six.string_types, list, type(None))): msg = ("The 'security_group' parameter must be either a list or a " "comma-separated string.") log.error(msg) ret.update({'comment': msg, 'result': False}) return ret if isinstance(security_groups, six.string_types): security_groups = security_groups.split(',') _ret = _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile) ret.update({'changes': _ret['changes'], 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists and __opts__['test']: return ret if attributes: _ret = _attributes_present(name, attributes, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _health_check_present(name, health_check, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if cnames: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if lb: for cname in cnames: _ret = None dns_provider = 'boto_route53' cname.update({'record_type': 'CNAME', 'value': lb['dns_name']}) if 'provider' in cname: dns_provider = cname.pop('provider') if dns_provider == 'boto_route53': for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'): cname[p] = locals().get(p) if p not in cname else cname[p] _ret = __states__['boto_route53.present'](**cname) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _tags_present(name, tags, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if not instance_ids: instance_ids = [] if instance_names: # AWS borks on adding instances in "non-running" states, so filter 'em out. running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') for n in instance_names: instance_ids += __salt__['boto_ec2.find_instances']( name=n, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) # Backwards compat: Only touch attached instances if requested (e.g. if some are defined). if instance_ids: if __opts__['test']: if __salt__['boto_elb.set_instances']( name, instance_ids, True, region, key, keyid, profile): ret['comment'] += ' ELB {0} instances would be updated.'.format(name) ret['result'] = None else: success = __salt__['boto_elb.set_instances']( name, instance_ids, False, region, key, keyid, profile) if not success: ret['comment'] += "Failed to set requested instances." ret['result'] = False return ret def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not lb: msg = 'Could not find lb {0}'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) nodes = [value['instance_id'] for value in health if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not new: msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]')) log.debug(msg) ret.update({'comment': msg}) return ret if __opts__['test']: ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new) ret['result'] = None return ret state = __salt__['boto_elb.register_instances']( name, instances, region, key, keyid, profile) if state: msg = 'Load Balancer {0} has been changed'.format(name) log.info(msg) new = set().union(nodes, instances) ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes), 'new': '\n'.join(list(new))}}) else: msg = 'Load balancer {0} failed to add instances'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies' def _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)): raise SaltInvocationError('Exactly one of availability_zones, subnets, ' 'subnet_names must be provided as arguments.') if not listeners: listeners = [] for listener in listeners: if len(listener) < 3: raise SaltInvocationError('Listeners must have at minimum port,' ' instance_port and protocol values in' ' the provided list.') if 'elb_port' not in listener: raise SaltInvocationError('elb_port is a required value for' ' listeners.') if 'instance_port' not in listener: raise SaltInvocationError('instance_port is a required value for' ' listeners.') if 'elb_protocol' not in listener: raise SaltInvocationError('elb_protocol is a required value for' ' listeners.') listener['elb_protocol'] = listener['elb_protocol'].upper() if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener: raise SaltInvocationError('certificate is a required value for' ' listeners if HTTPS is set for' ' elb_protocol.') # best attempt at principle of least surprise here: # only use the default pillar in cases where we don't explicitly # define policies OR policies_from_pillar on a listener policies = listener.setdefault('policies', []) policies_pillar = listener.get('policies_from_pillar', None) if not policies and policies_pillar is None: policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY if policies_pillar: policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], []) # Look up subnet ids from names if provided if subnet_names: subnets = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnets.append(r['id']) _security_groups = None if subnets: vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile) vpc_id = vpc_id.get('vpc_id') if not vpc_id: ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets) ret['result'] = False return ret _security_groups = __salt__['boto_secgroup.convert_to_group_ids']( security_groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) if not _security_groups: ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups) ret['result'] = False return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_elb.create'](name=name, availability_zones=availability_zones, listeners=listeners, subnets=subnets, security_groups=_security_groups, scheme=scheme, region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes']['old'] = {'elb': None} ret['changes']['new'] = {'elb': name} ret['comment'] = 'ELB {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} ELB.'.format(name) else: ret['comment'] = 'ELB {0} present.'.format(name) _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _listeners_present(name, listeners, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if availability_zones: _ret = _zones_present(name, availability_zones, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret elif subnets: _ret = _subnets_present(name, subnets, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _listeners_present(name, listeners, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not listeners: listeners = [] expected_listeners_by_tuple = {} for l in listeners: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) expected_listeners_by_tuple[l_key] = l actual_listeners_by_tuple = {} for l in lb['listeners']: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) actual_listeners_by_tuple[l_key] = l to_delete = [] to_create = [] for t, l in six.iteritems(expected_listeners_by_tuple): if t not in actual_listeners_by_tuple: to_create.append(l) for t, l in six.iteritems(actual_listeners_by_tuple): if t not in expected_listeners_by_tuple: to_delete.append(l) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have listeners modified:'.format(name)) for listener in to_create: msg.append('Listener {0} added.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) for listener in to_delete: msg.append('Listener {0} deleted.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) ret['result'] = None else: msg.append('Listeners already set on ELB {0}.'.format(name)) ret['comment'] = ' '.join(msg) return ret if to_delete: ports = [l['elb_port'] for l in to_delete] deleted = __salt__['boto_elb.delete_listeners'](name, ports, region, key, keyid, profile) if deleted: ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name) ret['result'] = False if to_create: created = __salt__['boto_elb.create_listeners'](name, to_create, region, key, keyid, profile) if created: msg = 'Created listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to create listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False if to_create or to_delete: ret['changes']['listeners'] = {} ret['changes']['listeners']['old'] = lb['listeners'] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['listeners']['new'] = lb['listeners'] else: ret['comment'] = 'Listeners already set on ELB {0}.'.format(name) return ret def _security_groups_present(name, security_groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not security_groups: security_groups = [] change_needed = False if set(security_groups) != set(lb['security_groups']): change_needed = True if change_needed: if __opts__['test']: ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name) ret['result'] = None return ret changed = __salt__['boto_elb.apply_security_groups']( name, security_groups, region, key, keyid, profile ) if changed: ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name) ret['result'] = False ret['changes']['old'] = {'security_groups': lb['security_groups']} ret['changes']['new'] = {'security_groups': security_groups} else: ret['comment'] = 'security_groups already set on ELB {0}.'.format(name) return ret def _attributes_present(name, attributes, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid, profile) if not _attributes: ret['result'] = False ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name) return ret attrs_to_set = [] if 'cross_zone_load_balancing' in attributes: czlb = attributes['cross_zone_load_balancing'] _czlb = _attributes['cross_zone_load_balancing'] if czlb['enabled'] != _czlb['enabled']: attrs_to_set.append('cross_zone_load_balancing') if 'connection_draining' in attributes: cd = attributes['connection_draining'] _cd = _attributes['connection_draining'] if (cd['enabled'] != _cd['enabled'] or cd.get('timeout', 300) != _cd.get('timeout')): attrs_to_set.append('connection_draining') if 'connecting_settings' in attributes: cs = attributes['connecting_settings'] _cs = _attributes['connecting_settings'] if cs['idle_timeout'] != _cs['idle_timeout']: attrs_to_set.append('connecting_settings') if 'access_log' in attributes: for attr, val in six.iteritems(attributes['access_log']): if six.text_type(_attributes['access_log'][attr]) != six.text_type(val): attrs_to_set.append('access_log') if 's3_bucket_prefix' in attributes['access_log']: sbp = attributes['access_log']['s3_bucket_prefix'] if sbp.startswith('/') or sbp.endswith('/'): raise SaltInvocationError('s3_bucket_prefix can not start or' ' end with /.') if attrs_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have attributes set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_attributes'](name, attributes, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'attributes': _attributes} ret['changes']['new'] = {'attributes': attributes} ret['comment'] = 'Set attributes on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name) else: ret['comment'] = 'Attributes already set on ELB {0}.'.format(name) return ret def _health_check_present(name, health_check, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not health_check: health_check = {} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) if not _health_check: ret['result'] = False ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name) return ret need_to_set = False for attr, val in six.iteritems(health_check): if six.text_type(_health_check[attr]) != six.text_type(val): need_to_set = True if need_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have health check set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_health_check'](name, health_check, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'health_check': _health_check} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) ret['changes']['new'] = {'health_check': _health_check} ret['comment'] = 'Set health check on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name) else: ret['comment'] = 'Health check already set on ELB {0}.'.format(name) return ret def _zones_present(name, availability_zones, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _zones = lb['availability_zones'] for zone in availability_zones: if zone not in _zones: to_enable.append(zone) for zone in _zones: if zone not in availability_zones: to_disable.append(zone) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have availability zones set.'.format(name) ret['result'] = None return ret if to_enable: enabled = __salt__['boto_elb.enable_availability_zones']( name, to_enable, region, key, keyid, profile) if enabled: ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name) ret['result'] = False if to_disable: disabled = __salt__['boto_elb.disable_availability_zones']( name, to_disable, region, key, keyid, profile) if disabled: msg = 'Disabled availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to disable availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False ret['changes']['old'] = {'availability_zones': lb['availability_zones']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'availability_zones': lb['availability_zones']} else: ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name) return ret def _subnets_present(name, subnets, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not subnets: subnets = [] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _subnets = lb['subnets'] for subnet in subnets: if subnet not in _subnets: to_enable.append(subnet) for subnet in _subnets: if subnet not in subnets: to_disable.append(subnet) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have subnets set.'.format(name) ret['result'] = None return ret if to_enable: attached = __salt__['boto_elb.attach_subnets'](name, to_enable, region, key, keyid, profile) if attached: ret['comment'] = 'Attached subnets on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name) ret['result'] = False if to_disable: detached = __salt__['boto_elb.detach_subnets'](name, to_disable, region, key, keyid, profile) if detached: ret['comment'] = ' '.join([ ret['comment'], 'Detached subnets on {0} ELB.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to detach subnets on {0} ELB.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'subnets': lb['subnets']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'subnets': lb['subnets']} else: ret['comment'] = 'Subnets already set on ELB {0}.'.format(name) return ret def _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile): '''helper method for present. ensure that ELB policies are set''' if policies is None: policies = [] pillar_policies = __salt__['config.option'](policies_from_pillar, []) policies = policies + pillar_policies if backends is None: backends = [] # check for policy name uniqueness and correct type policy_names = set() for p in policies: if 'policy_name' not in p: raise SaltInvocationError('policy_name is a required value for ' 'policies.') if 'policy_type' not in p: raise SaltInvocationError('policy_type is a required value for ' 'policies.') if 'policy' not in p: raise SaltInvocationError('policy is a required value for ' 'listeners.') # check for unique policy names if p['policy_name'] in policy_names: raise SaltInvocationError('Policy names must be unique: policy {0}' ' is declared twice.'.format(p['policy_name'])) policy_names.add(p['policy_name']) # check that listeners refer to valid policy names for l in listeners: for p in l.get('policies', []): if p not in policy_names: raise SaltInvocationError('Listener {0} on ELB {1} refers to ' 'undefined policy {2}.'.format(l['elb_port'], name, p)) # check that backends refer to valid policy names for b in backends: for p in b.get('policies', []): if p not in policy_names: raise SaltInvocationError('Backend {0} on ELB {1} refers to ' 'undefined policy ' '{2}.'.format(b['instance_port'], name, p)) ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret # Policies have two names: # - a short name ('name') that's only the policy name (e.g. testpolicy) # - a canonical name ('cname') that contains the policy type and hash # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524) policies_by_cname = {} cnames_by_name = {} for p in policies: cname = _policy_cname(p) policies_by_cname[cname] = p cnames_by_name[p['policy_name']] = cname expected_policy_names = policies_by_cname.keys() actual_policy_names = lb['policies'] # This is sadly a huge hack to get around the fact that AWS assigns a # default SSLNegotiationPolicyType policy (with the naming scheme # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an # explicit policy set. If we don't keep track of the default policies and # explicitly exclude them from deletion, orchestration will fail because we # attempt to delete the default policy that's being used by listeners that # were created with no explicit policy. default_aws_policies = set() expected_policies_by_listener = {} for l in listeners: expected_policies_by_listener[l['elb_port']] = set( [cnames_by_name[p] for p in l.get('policies', [])]) actual_policies_by_listener = {} for l in lb['listeners']: listener_policies = set(l.get('policies', [])) actual_policies_by_listener[l['elb_port']] = listener_policies # Determine if any actual listener policies look like default policies, # so we can exclude them from deletion below (see note about this hack # above). for p in listener_policies: if re.match(r'^ELBSecurityPolicy-\d{4}-\d{2}$', p): default_aws_policies.add(p) expected_policies_by_backend = {} for b in backends: expected_policies_by_backend[b['instance_port']] = set( [cnames_by_name[p] for p in b.get('policies', [])]) actual_policies_by_backend = {} for b in lb['backends']: backend_policies = set(b.get('policies', [])) actual_policies_by_backend[b['instance_port']] = backend_policies to_delete = [] to_create = [] for policy_name in expected_policy_names: if policy_name not in actual_policy_names: to_create.append(policy_name) for policy_name in actual_policy_names: if policy_name not in expected_policy_names: if policy_name not in default_aws_policies: to_delete.append(policy_name) listeners_to_update = set() for port, policies in six.iteritems(expected_policies_by_listener): if policies != actual_policies_by_listener.get(port, set()): listeners_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_listener): if policies != expected_policies_by_listener.get(port, set()): listeners_to_update.add(port) backends_to_update = set() for port, policies in six.iteritems(expected_policies_by_backend): if policies != actual_policies_by_backend.get(port, set()): backends_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_backend): if policies != expected_policies_by_backend.get(port, set()): backends_to_update.add(port) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have policies modified:'.format(name)) for policy in to_create: msg.append('Policy {0} added.'.format(policy)) for policy in to_delete: msg.append('Policy {0} deleted.'.format(policy)) ret['result'] = None else: msg.append('Policies already set on ELB {0}.'.format(name)) for listener in listeners_to_update: msg.append('Listener {0} policies updated.'.format(listener)) for backend in backends_to_update: msg.append('Backend {0} policies updated.'.format(backend)) ret['comment'] = ' '.join(msg) return ret if to_create: for policy_name in to_create: created = __salt__['boto_elb.create_policy']( name=name, policy_name=policy_name, policy_type=policies_by_cname[policy_name]['policy_type'], policy=policies_by_cname[policy_name]['policy'], region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes'].setdefault(policy_name, {})['new'] = policy_name comment = "Policy {0} was created on ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in listeners_to_update: policy_set = __salt__['boto_elb.set_listener_policy']( name=name, port=port, policies=list(expected_policies_by_listener.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'listener_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_listener.get(port, [])), 'new': list(expected_policies_by_listener.get(port, [])), } comment = "Policy {0} was created on ELB {1} listener {2}".format( expected_policies_by_listener[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in backends_to_update: policy_set = __salt__['boto_elb.set_backend_policy']( name=name, port=port, policies=list(expected_policies_by_backend.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'backend_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_backend.get(port, [])), 'new': list(expected_policies_by_backend.get(port, [])), } comment = "Policy {0} was created on ELB {1} backend {2}".format( expected_policies_by_backend[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret if to_delete: for policy_name in to_delete: deleted = __salt__['boto_elb.delete_policy']( name=name, policy_name=policy_name, region=region, key=key, keyid=keyid, profile=profile) if deleted: ret['changes'].setdefault(policy_name, {})['old'] = policy_name comment = "Policy {0} was deleted from ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret return ret def _policy_cname(policy_dict): policy_name = policy_dict['policy_name'] policy_type = policy_dict['policy_type'] policy = policy_dict['policy'] canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0]))) policy_hash = hashlib.md5( salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function if policy_type.endswith('Type'): policy_type = policy_type[:-4] return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash) def absent(name, region=None, key=None, keyid=None, profile=None): ''' Ensure an ELB does not exist name name of the ELB ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_elb.delete'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'elb': name} ret['changes']['new'] = {'elb': None} ret['comment'] = 'ELB {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} ELB.'.format(name) else: ret['comment'] = '{0} ELB does not exist.'.format(name) return ret def _tags_present(name, tags, region, key, keyid, profile): ''' helper function to validate tags on elb ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) tags_to_add = tags tags_to_update = {} tags_to_remove = [] if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: tags_to_remove.append(_tag) else: if tags[_tag] != lb['tags'][_tag]: tags_to_update[_tag] = tags[_tag] tags_to_add.pop(_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: _ret = __salt__['boto_elb.delete_tags']( name, tags_to_remove, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove) ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for _tag in tags_to_remove: ret['changes']['old']['tags'][_tag] = lb['tags'][_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '. join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) else: all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update) _ret = __salt__['boto_elb.set_tags']( name, all_tag_changes, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in lb: if lb['tags']: if tag in lb['tags']: ret['changes']['old']['tags'][tag] = lb['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: msg = 'Tags are already set.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret
saltstack/salt
salt/states/boto_elb.py
_policies_present
python
def _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile): '''helper method for present. ensure that ELB policies are set''' if policies is None: policies = [] pillar_policies = __salt__['config.option'](policies_from_pillar, []) policies = policies + pillar_policies if backends is None: backends = [] # check for policy name uniqueness and correct type policy_names = set() for p in policies: if 'policy_name' not in p: raise SaltInvocationError('policy_name is a required value for ' 'policies.') if 'policy_type' not in p: raise SaltInvocationError('policy_type is a required value for ' 'policies.') if 'policy' not in p: raise SaltInvocationError('policy is a required value for ' 'listeners.') # check for unique policy names if p['policy_name'] in policy_names: raise SaltInvocationError('Policy names must be unique: policy {0}' ' is declared twice.'.format(p['policy_name'])) policy_names.add(p['policy_name']) # check that listeners refer to valid policy names for l in listeners: for p in l.get('policies', []): if p not in policy_names: raise SaltInvocationError('Listener {0} on ELB {1} refers to ' 'undefined policy {2}.'.format(l['elb_port'], name, p)) # check that backends refer to valid policy names for b in backends: for p in b.get('policies', []): if p not in policy_names: raise SaltInvocationError('Backend {0} on ELB {1} refers to ' 'undefined policy ' '{2}.'.format(b['instance_port'], name, p)) ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret # Policies have two names: # - a short name ('name') that's only the policy name (e.g. testpolicy) # - a canonical name ('cname') that contains the policy type and hash # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524) policies_by_cname = {} cnames_by_name = {} for p in policies: cname = _policy_cname(p) policies_by_cname[cname] = p cnames_by_name[p['policy_name']] = cname expected_policy_names = policies_by_cname.keys() actual_policy_names = lb['policies'] # This is sadly a huge hack to get around the fact that AWS assigns a # default SSLNegotiationPolicyType policy (with the naming scheme # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an # explicit policy set. If we don't keep track of the default policies and # explicitly exclude them from deletion, orchestration will fail because we # attempt to delete the default policy that's being used by listeners that # were created with no explicit policy. default_aws_policies = set() expected_policies_by_listener = {} for l in listeners: expected_policies_by_listener[l['elb_port']] = set( [cnames_by_name[p] for p in l.get('policies', [])]) actual_policies_by_listener = {} for l in lb['listeners']: listener_policies = set(l.get('policies', [])) actual_policies_by_listener[l['elb_port']] = listener_policies # Determine if any actual listener policies look like default policies, # so we can exclude them from deletion below (see note about this hack # above). for p in listener_policies: if re.match(r'^ELBSecurityPolicy-\d{4}-\d{2}$', p): default_aws_policies.add(p) expected_policies_by_backend = {} for b in backends: expected_policies_by_backend[b['instance_port']] = set( [cnames_by_name[p] for p in b.get('policies', [])]) actual_policies_by_backend = {} for b in lb['backends']: backend_policies = set(b.get('policies', [])) actual_policies_by_backend[b['instance_port']] = backend_policies to_delete = [] to_create = [] for policy_name in expected_policy_names: if policy_name not in actual_policy_names: to_create.append(policy_name) for policy_name in actual_policy_names: if policy_name not in expected_policy_names: if policy_name not in default_aws_policies: to_delete.append(policy_name) listeners_to_update = set() for port, policies in six.iteritems(expected_policies_by_listener): if policies != actual_policies_by_listener.get(port, set()): listeners_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_listener): if policies != expected_policies_by_listener.get(port, set()): listeners_to_update.add(port) backends_to_update = set() for port, policies in six.iteritems(expected_policies_by_backend): if policies != actual_policies_by_backend.get(port, set()): backends_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_backend): if policies != expected_policies_by_backend.get(port, set()): backends_to_update.add(port) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have policies modified:'.format(name)) for policy in to_create: msg.append('Policy {0} added.'.format(policy)) for policy in to_delete: msg.append('Policy {0} deleted.'.format(policy)) ret['result'] = None else: msg.append('Policies already set on ELB {0}.'.format(name)) for listener in listeners_to_update: msg.append('Listener {0} policies updated.'.format(listener)) for backend in backends_to_update: msg.append('Backend {0} policies updated.'.format(backend)) ret['comment'] = ' '.join(msg) return ret if to_create: for policy_name in to_create: created = __salt__['boto_elb.create_policy']( name=name, policy_name=policy_name, policy_type=policies_by_cname[policy_name]['policy_type'], policy=policies_by_cname[policy_name]['policy'], region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes'].setdefault(policy_name, {})['new'] = policy_name comment = "Policy {0} was created on ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in listeners_to_update: policy_set = __salt__['boto_elb.set_listener_policy']( name=name, port=port, policies=list(expected_policies_by_listener.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'listener_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_listener.get(port, [])), 'new': list(expected_policies_by_listener.get(port, [])), } comment = "Policy {0} was created on ELB {1} listener {2}".format( expected_policies_by_listener[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in backends_to_update: policy_set = __salt__['boto_elb.set_backend_policy']( name=name, port=port, policies=list(expected_policies_by_backend.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'backend_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_backend.get(port, [])), 'new': list(expected_policies_by_backend.get(port, [])), } comment = "Policy {0} was created on ELB {1} backend {2}".format( expected_policies_by_backend[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret if to_delete: for policy_name in to_delete: deleted = __salt__['boto_elb.delete_policy']( name=name, policy_name=policy_name, region=region, key=key, keyid=keyid, profile=profile) if deleted: ret['changes'].setdefault(policy_name, {})['old'] = policy_name comment = "Policy {0} was deleted from ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret return ret
helper method for present. ensure that ELB policies are set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_elb.py#L1016-L1247
null
# -*- coding: utf-8 -*- ''' Manage ELBs .. versionadded:: 2014.7.0 Create and destroy ELBs. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml elb.keyid: GKTADJGHEIQSXMKKRBJ08H elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - availability_zones: - us-east-1a - us-east-1c - us-east-1d - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - listeners: - elb_port: 443 instance_port: 80 elb_protocol: HTTPS instance_protocol: HTTP certificate: 'arn:aws:iam::1111111:server-certificate/mycert' policies: - my-ssl-policy - cookie-policy - elb_port: 8210 instance_port: 8210 elb_protocol: TCP - backends: - instance_port: 80 policies: - enable-proxy-protocol - health_check: target: 'HTTP:80/' - attributes: cross_zone_load_balancing: enabled: true access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 connecting_settings: idle_timeout: 60 - cnames: - name: mycname.example.com. zone: example.com. ttl: 60 - name: myothercname.example.com. zone: example.com. - security_groups: - my-security-group - policies: - policy_name: my-ssl-policy policy_type: SSLNegotiationPolicyType policy: Protocol-TLSv1.2: true Protocol-SSLv3: false Server-Defined-Cipher-Order: true ECDHE-ECDSA-AES128-GCM-SHA256: true - policy_name: cookie-policy policy_type: LBCookieStickinessPolicyType policy: {} # no policy means this is a session cookie - policy_name: enable-proxy-protocol policy_type: ProxyProtocolPolicyType policy: ProxyProtocol: true # Using a profile from pillars Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile # Passing in a profile Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's possible to specify attributes from pillars by specifying a pillar. You can override the values defined in the pillard by setting the attributes on the resource. The module will use the default pillar key 'boto_elb_attributes', which allows you to set default attributes for all ELB resources. Setting the attributes pillar: .. code-block:: yaml my_elb_attributes: cross_zone_load_balancing: enabled: true connection_draining: enabled: true timeout: 20 access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 Overriding the attribute values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - attributes_from_pillar: my_elb_attributes # override cross_zone_load_balancing:enabled - attributes: cross_zone_load_balancing: enabled: false - profile: myelbprofile It's possible to specify cloudwatch alarms that will be setup along with the ELB. Note the alarm name will be defined by the name attribute provided, plus the ELB resource name. .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_elb_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for a resource. Setting the alarms in a pillar: .. code-block:: yaml my_elb_alarm: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms_from_pillar: my_elb_alarm # override UnHealthyHostCount:attributes:threshold - alarms: UnHealthyHostCount: attributes: threshold: 2.0 Tags can also be set: .. versionadded:: 2016.3.0 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - tags: MyTag: 'My Tag Value' OtherTag: 'My Other Value' ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import hashlib import re import salt.utils.data import salt.utils.dictupdate import salt.utils.stringutils from salt.exceptions import SaltInvocationError from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_elb' if 'boto_elb.exists' in __salt__ else False def present(name, listeners, availability_zones=None, subnets=None, subnet_names=None, security_groups=None, scheme='internet-facing', health_check=None, attributes=None, attributes_from_pillar="boto_elb_attributes", cnames=None, alarms=None, alarms_from_pillar="boto_elb_alarms", policies=None, policies_from_pillar="boto_elb_policies", backends=None, region=None, key=None, keyid=None, profile=None, wait_for_sync=True, tags=None, instance_ids=None, instance_names=None): ''' Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids. ''' # load data from attributes_from_pillar and merge with attributes tmp = __salt__['config.option'](attributes_from_pillar, {}) attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not isinstance(security_groups, (six.string_types, list, type(None))): msg = ("The 'security_group' parameter must be either a list or a " "comma-separated string.") log.error(msg) ret.update({'comment': msg, 'result': False}) return ret if isinstance(security_groups, six.string_types): security_groups = security_groups.split(',') _ret = _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile) ret.update({'changes': _ret['changes'], 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists and __opts__['test']: return ret if attributes: _ret = _attributes_present(name, attributes, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _health_check_present(name, health_check, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if cnames: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if lb: for cname in cnames: _ret = None dns_provider = 'boto_route53' cname.update({'record_type': 'CNAME', 'value': lb['dns_name']}) if 'provider' in cname: dns_provider = cname.pop('provider') if dns_provider == 'boto_route53': for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'): cname[p] = locals().get(p) if p not in cname else cname[p] _ret = __states__['boto_route53.present'](**cname) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _tags_present(name, tags, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if not instance_ids: instance_ids = [] if instance_names: # AWS borks on adding instances in "non-running" states, so filter 'em out. running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') for n in instance_names: instance_ids += __salt__['boto_ec2.find_instances']( name=n, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) # Backwards compat: Only touch attached instances if requested (e.g. if some are defined). if instance_ids: if __opts__['test']: if __salt__['boto_elb.set_instances']( name, instance_ids, True, region, key, keyid, profile): ret['comment'] += ' ELB {0} instances would be updated.'.format(name) ret['result'] = None else: success = __salt__['boto_elb.set_instances']( name, instance_ids, False, region, key, keyid, profile) if not success: ret['comment'] += "Failed to set requested instances." ret['result'] = False return ret def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not lb: msg = 'Could not find lb {0}'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) nodes = [value['instance_id'] for value in health if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not new: msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]')) log.debug(msg) ret.update({'comment': msg}) return ret if __opts__['test']: ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new) ret['result'] = None return ret state = __salt__['boto_elb.register_instances']( name, instances, region, key, keyid, profile) if state: msg = 'Load Balancer {0} has been changed'.format(name) log.info(msg) new = set().union(nodes, instances) ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes), 'new': '\n'.join(list(new))}}) else: msg = 'Load balancer {0} failed to add instances'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies' def _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)): raise SaltInvocationError('Exactly one of availability_zones, subnets, ' 'subnet_names must be provided as arguments.') if not listeners: listeners = [] for listener in listeners: if len(listener) < 3: raise SaltInvocationError('Listeners must have at minimum port,' ' instance_port and protocol values in' ' the provided list.') if 'elb_port' not in listener: raise SaltInvocationError('elb_port is a required value for' ' listeners.') if 'instance_port' not in listener: raise SaltInvocationError('instance_port is a required value for' ' listeners.') if 'elb_protocol' not in listener: raise SaltInvocationError('elb_protocol is a required value for' ' listeners.') listener['elb_protocol'] = listener['elb_protocol'].upper() if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener: raise SaltInvocationError('certificate is a required value for' ' listeners if HTTPS is set for' ' elb_protocol.') # best attempt at principle of least surprise here: # only use the default pillar in cases where we don't explicitly # define policies OR policies_from_pillar on a listener policies = listener.setdefault('policies', []) policies_pillar = listener.get('policies_from_pillar', None) if not policies and policies_pillar is None: policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY if policies_pillar: policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], []) # Look up subnet ids from names if provided if subnet_names: subnets = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnets.append(r['id']) _security_groups = None if subnets: vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile) vpc_id = vpc_id.get('vpc_id') if not vpc_id: ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets) ret['result'] = False return ret _security_groups = __salt__['boto_secgroup.convert_to_group_ids']( security_groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) if not _security_groups: ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups) ret['result'] = False return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_elb.create'](name=name, availability_zones=availability_zones, listeners=listeners, subnets=subnets, security_groups=_security_groups, scheme=scheme, region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes']['old'] = {'elb': None} ret['changes']['new'] = {'elb': name} ret['comment'] = 'ELB {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} ELB.'.format(name) else: ret['comment'] = 'ELB {0} present.'.format(name) _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _listeners_present(name, listeners, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if availability_zones: _ret = _zones_present(name, availability_zones, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret elif subnets: _ret = _subnets_present(name, subnets, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _listeners_present(name, listeners, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not listeners: listeners = [] expected_listeners_by_tuple = {} for l in listeners: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) expected_listeners_by_tuple[l_key] = l actual_listeners_by_tuple = {} for l in lb['listeners']: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) actual_listeners_by_tuple[l_key] = l to_delete = [] to_create = [] for t, l in six.iteritems(expected_listeners_by_tuple): if t not in actual_listeners_by_tuple: to_create.append(l) for t, l in six.iteritems(actual_listeners_by_tuple): if t not in expected_listeners_by_tuple: to_delete.append(l) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have listeners modified:'.format(name)) for listener in to_create: msg.append('Listener {0} added.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) for listener in to_delete: msg.append('Listener {0} deleted.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) ret['result'] = None else: msg.append('Listeners already set on ELB {0}.'.format(name)) ret['comment'] = ' '.join(msg) return ret if to_delete: ports = [l['elb_port'] for l in to_delete] deleted = __salt__['boto_elb.delete_listeners'](name, ports, region, key, keyid, profile) if deleted: ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name) ret['result'] = False if to_create: created = __salt__['boto_elb.create_listeners'](name, to_create, region, key, keyid, profile) if created: msg = 'Created listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to create listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False if to_create or to_delete: ret['changes']['listeners'] = {} ret['changes']['listeners']['old'] = lb['listeners'] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['listeners']['new'] = lb['listeners'] else: ret['comment'] = 'Listeners already set on ELB {0}.'.format(name) return ret def _security_groups_present(name, security_groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not security_groups: security_groups = [] change_needed = False if set(security_groups) != set(lb['security_groups']): change_needed = True if change_needed: if __opts__['test']: ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name) ret['result'] = None return ret changed = __salt__['boto_elb.apply_security_groups']( name, security_groups, region, key, keyid, profile ) if changed: ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name) ret['result'] = False ret['changes']['old'] = {'security_groups': lb['security_groups']} ret['changes']['new'] = {'security_groups': security_groups} else: ret['comment'] = 'security_groups already set on ELB {0}.'.format(name) return ret def _attributes_present(name, attributes, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid, profile) if not _attributes: ret['result'] = False ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name) return ret attrs_to_set = [] if 'cross_zone_load_balancing' in attributes: czlb = attributes['cross_zone_load_balancing'] _czlb = _attributes['cross_zone_load_balancing'] if czlb['enabled'] != _czlb['enabled']: attrs_to_set.append('cross_zone_load_balancing') if 'connection_draining' in attributes: cd = attributes['connection_draining'] _cd = _attributes['connection_draining'] if (cd['enabled'] != _cd['enabled'] or cd.get('timeout', 300) != _cd.get('timeout')): attrs_to_set.append('connection_draining') if 'connecting_settings' in attributes: cs = attributes['connecting_settings'] _cs = _attributes['connecting_settings'] if cs['idle_timeout'] != _cs['idle_timeout']: attrs_to_set.append('connecting_settings') if 'access_log' in attributes: for attr, val in six.iteritems(attributes['access_log']): if six.text_type(_attributes['access_log'][attr]) != six.text_type(val): attrs_to_set.append('access_log') if 's3_bucket_prefix' in attributes['access_log']: sbp = attributes['access_log']['s3_bucket_prefix'] if sbp.startswith('/') or sbp.endswith('/'): raise SaltInvocationError('s3_bucket_prefix can not start or' ' end with /.') if attrs_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have attributes set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_attributes'](name, attributes, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'attributes': _attributes} ret['changes']['new'] = {'attributes': attributes} ret['comment'] = 'Set attributes on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name) else: ret['comment'] = 'Attributes already set on ELB {0}.'.format(name) return ret def _health_check_present(name, health_check, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not health_check: health_check = {} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) if not _health_check: ret['result'] = False ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name) return ret need_to_set = False for attr, val in six.iteritems(health_check): if six.text_type(_health_check[attr]) != six.text_type(val): need_to_set = True if need_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have health check set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_health_check'](name, health_check, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'health_check': _health_check} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) ret['changes']['new'] = {'health_check': _health_check} ret['comment'] = 'Set health check on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name) else: ret['comment'] = 'Health check already set on ELB {0}.'.format(name) return ret def _zones_present(name, availability_zones, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _zones = lb['availability_zones'] for zone in availability_zones: if zone not in _zones: to_enable.append(zone) for zone in _zones: if zone not in availability_zones: to_disable.append(zone) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have availability zones set.'.format(name) ret['result'] = None return ret if to_enable: enabled = __salt__['boto_elb.enable_availability_zones']( name, to_enable, region, key, keyid, profile) if enabled: ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name) ret['result'] = False if to_disable: disabled = __salt__['boto_elb.disable_availability_zones']( name, to_disable, region, key, keyid, profile) if disabled: msg = 'Disabled availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to disable availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False ret['changes']['old'] = {'availability_zones': lb['availability_zones']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'availability_zones': lb['availability_zones']} else: ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name) return ret def _subnets_present(name, subnets, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not subnets: subnets = [] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _subnets = lb['subnets'] for subnet in subnets: if subnet not in _subnets: to_enable.append(subnet) for subnet in _subnets: if subnet not in subnets: to_disable.append(subnet) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have subnets set.'.format(name) ret['result'] = None return ret if to_enable: attached = __salt__['boto_elb.attach_subnets'](name, to_enable, region, key, keyid, profile) if attached: ret['comment'] = 'Attached subnets on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name) ret['result'] = False if to_disable: detached = __salt__['boto_elb.detach_subnets'](name, to_disable, region, key, keyid, profile) if detached: ret['comment'] = ' '.join([ ret['comment'], 'Detached subnets on {0} ELB.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to detach subnets on {0} ELB.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'subnets': lb['subnets']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'subnets': lb['subnets']} else: ret['comment'] = 'Subnets already set on ELB {0}.'.format(name) return ret def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' current = __salt__['config.option'](alarms_from_pillar, {}) if alarms: current = salt.utils.dictupdate.update(current, alarms) ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(current): info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } # No test=False cluase needed since the state handles that itself... results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results.get('result'): ret["result"] = results["result"] if results.get("changes", {}) != {}: ret["changes"][info["name"]] = results["changes"] if "comment" in results: ret["comment"] += results["comment"] return ret def _policy_cname(policy_dict): policy_name = policy_dict['policy_name'] policy_type = policy_dict['policy_type'] policy = policy_dict['policy'] canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0]))) policy_hash = hashlib.md5( salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function if policy_type.endswith('Type'): policy_type = policy_type[:-4] return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash) def absent(name, region=None, key=None, keyid=None, profile=None): ''' Ensure an ELB does not exist name name of the ELB ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_elb.delete'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'elb': name} ret['changes']['new'] = {'elb': None} ret['comment'] = 'ELB {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} ELB.'.format(name) else: ret['comment'] = '{0} ELB does not exist.'.format(name) return ret def _tags_present(name, tags, region, key, keyid, profile): ''' helper function to validate tags on elb ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) tags_to_add = tags tags_to_update = {} tags_to_remove = [] if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: tags_to_remove.append(_tag) else: if tags[_tag] != lb['tags'][_tag]: tags_to_update[_tag] = tags[_tag] tags_to_add.pop(_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: _ret = __salt__['boto_elb.delete_tags']( name, tags_to_remove, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove) ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for _tag in tags_to_remove: ret['changes']['old']['tags'][_tag] = lb['tags'][_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '. join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) else: all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update) _ret = __salt__['boto_elb.set_tags']( name, all_tag_changes, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in lb: if lb['tags']: if tag in lb['tags']: ret['changes']['old']['tags'][tag] = lb['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: msg = 'Tags are already set.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret
saltstack/salt
salt/states/boto_elb.py
absent
python
def absent(name, region=None, key=None, keyid=None, profile=None): ''' Ensure an ELB does not exist name name of the ELB ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_elb.delete'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'elb': name} ret['changes']['new'] = {'elb': None} ret['comment'] = 'ELB {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} ELB.'.format(name) else: ret['comment'] = '{0} ELB does not exist.'.format(name) return ret
Ensure an ELB does not exist name name of the ELB
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_elb.py#L1262-L1288
null
# -*- coding: utf-8 -*- ''' Manage ELBs .. versionadded:: 2014.7.0 Create and destroy ELBs. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml elb.keyid: GKTADJGHEIQSXMKKRBJ08H elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - availability_zones: - us-east-1a - us-east-1c - us-east-1d - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - listeners: - elb_port: 443 instance_port: 80 elb_protocol: HTTPS instance_protocol: HTTP certificate: 'arn:aws:iam::1111111:server-certificate/mycert' policies: - my-ssl-policy - cookie-policy - elb_port: 8210 instance_port: 8210 elb_protocol: TCP - backends: - instance_port: 80 policies: - enable-proxy-protocol - health_check: target: 'HTTP:80/' - attributes: cross_zone_load_balancing: enabled: true access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 connecting_settings: idle_timeout: 60 - cnames: - name: mycname.example.com. zone: example.com. ttl: 60 - name: myothercname.example.com. zone: example.com. - security_groups: - my-security-group - policies: - policy_name: my-ssl-policy policy_type: SSLNegotiationPolicyType policy: Protocol-TLSv1.2: true Protocol-SSLv3: false Server-Defined-Cipher-Order: true ECDHE-ECDSA-AES128-GCM-SHA256: true - policy_name: cookie-policy policy_type: LBCookieStickinessPolicyType policy: {} # no policy means this is a session cookie - policy_name: enable-proxy-protocol policy_type: ProxyProtocolPolicyType policy: ProxyProtocol: true # Using a profile from pillars Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile # Passing in a profile Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's possible to specify attributes from pillars by specifying a pillar. You can override the values defined in the pillard by setting the attributes on the resource. The module will use the default pillar key 'boto_elb_attributes', which allows you to set default attributes for all ELB resources. Setting the attributes pillar: .. code-block:: yaml my_elb_attributes: cross_zone_load_balancing: enabled: true connection_draining: enabled: true timeout: 20 access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 Overriding the attribute values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - attributes_from_pillar: my_elb_attributes # override cross_zone_load_balancing:enabled - attributes: cross_zone_load_balancing: enabled: false - profile: myelbprofile It's possible to specify cloudwatch alarms that will be setup along with the ELB. Note the alarm name will be defined by the name attribute provided, plus the ELB resource name. .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_elb_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for a resource. Setting the alarms in a pillar: .. code-block:: yaml my_elb_alarm: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms_from_pillar: my_elb_alarm # override UnHealthyHostCount:attributes:threshold - alarms: UnHealthyHostCount: attributes: threshold: 2.0 Tags can also be set: .. versionadded:: 2016.3.0 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - tags: MyTag: 'My Tag Value' OtherTag: 'My Other Value' ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import hashlib import re import salt.utils.data import salt.utils.dictupdate import salt.utils.stringutils from salt.exceptions import SaltInvocationError from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_elb' if 'boto_elb.exists' in __salt__ else False def present(name, listeners, availability_zones=None, subnets=None, subnet_names=None, security_groups=None, scheme='internet-facing', health_check=None, attributes=None, attributes_from_pillar="boto_elb_attributes", cnames=None, alarms=None, alarms_from_pillar="boto_elb_alarms", policies=None, policies_from_pillar="boto_elb_policies", backends=None, region=None, key=None, keyid=None, profile=None, wait_for_sync=True, tags=None, instance_ids=None, instance_names=None): ''' Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids. ''' # load data from attributes_from_pillar and merge with attributes tmp = __salt__['config.option'](attributes_from_pillar, {}) attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not isinstance(security_groups, (six.string_types, list, type(None))): msg = ("The 'security_group' parameter must be either a list or a " "comma-separated string.") log.error(msg) ret.update({'comment': msg, 'result': False}) return ret if isinstance(security_groups, six.string_types): security_groups = security_groups.split(',') _ret = _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile) ret.update({'changes': _ret['changes'], 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists and __opts__['test']: return ret if attributes: _ret = _attributes_present(name, attributes, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _health_check_present(name, health_check, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if cnames: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if lb: for cname in cnames: _ret = None dns_provider = 'boto_route53' cname.update({'record_type': 'CNAME', 'value': lb['dns_name']}) if 'provider' in cname: dns_provider = cname.pop('provider') if dns_provider == 'boto_route53': for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'): cname[p] = locals().get(p) if p not in cname else cname[p] _ret = __states__['boto_route53.present'](**cname) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _tags_present(name, tags, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if not instance_ids: instance_ids = [] if instance_names: # AWS borks on adding instances in "non-running" states, so filter 'em out. running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') for n in instance_names: instance_ids += __salt__['boto_ec2.find_instances']( name=n, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) # Backwards compat: Only touch attached instances if requested (e.g. if some are defined). if instance_ids: if __opts__['test']: if __salt__['boto_elb.set_instances']( name, instance_ids, True, region, key, keyid, profile): ret['comment'] += ' ELB {0} instances would be updated.'.format(name) ret['result'] = None else: success = __salt__['boto_elb.set_instances']( name, instance_ids, False, region, key, keyid, profile) if not success: ret['comment'] += "Failed to set requested instances." ret['result'] = False return ret def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not lb: msg = 'Could not find lb {0}'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) nodes = [value['instance_id'] for value in health if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not new: msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]')) log.debug(msg) ret.update({'comment': msg}) return ret if __opts__['test']: ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new) ret['result'] = None return ret state = __salt__['boto_elb.register_instances']( name, instances, region, key, keyid, profile) if state: msg = 'Load Balancer {0} has been changed'.format(name) log.info(msg) new = set().union(nodes, instances) ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes), 'new': '\n'.join(list(new))}}) else: msg = 'Load balancer {0} failed to add instances'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies' def _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)): raise SaltInvocationError('Exactly one of availability_zones, subnets, ' 'subnet_names must be provided as arguments.') if not listeners: listeners = [] for listener in listeners: if len(listener) < 3: raise SaltInvocationError('Listeners must have at minimum port,' ' instance_port and protocol values in' ' the provided list.') if 'elb_port' not in listener: raise SaltInvocationError('elb_port is a required value for' ' listeners.') if 'instance_port' not in listener: raise SaltInvocationError('instance_port is a required value for' ' listeners.') if 'elb_protocol' not in listener: raise SaltInvocationError('elb_protocol is a required value for' ' listeners.') listener['elb_protocol'] = listener['elb_protocol'].upper() if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener: raise SaltInvocationError('certificate is a required value for' ' listeners if HTTPS is set for' ' elb_protocol.') # best attempt at principle of least surprise here: # only use the default pillar in cases where we don't explicitly # define policies OR policies_from_pillar on a listener policies = listener.setdefault('policies', []) policies_pillar = listener.get('policies_from_pillar', None) if not policies and policies_pillar is None: policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY if policies_pillar: policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], []) # Look up subnet ids from names if provided if subnet_names: subnets = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnets.append(r['id']) _security_groups = None if subnets: vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile) vpc_id = vpc_id.get('vpc_id') if not vpc_id: ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets) ret['result'] = False return ret _security_groups = __salt__['boto_secgroup.convert_to_group_ids']( security_groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) if not _security_groups: ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups) ret['result'] = False return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_elb.create'](name=name, availability_zones=availability_zones, listeners=listeners, subnets=subnets, security_groups=_security_groups, scheme=scheme, region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes']['old'] = {'elb': None} ret['changes']['new'] = {'elb': name} ret['comment'] = 'ELB {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} ELB.'.format(name) else: ret['comment'] = 'ELB {0} present.'.format(name) _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _listeners_present(name, listeners, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if availability_zones: _ret = _zones_present(name, availability_zones, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret elif subnets: _ret = _subnets_present(name, subnets, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _listeners_present(name, listeners, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not listeners: listeners = [] expected_listeners_by_tuple = {} for l in listeners: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) expected_listeners_by_tuple[l_key] = l actual_listeners_by_tuple = {} for l in lb['listeners']: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) actual_listeners_by_tuple[l_key] = l to_delete = [] to_create = [] for t, l in six.iteritems(expected_listeners_by_tuple): if t not in actual_listeners_by_tuple: to_create.append(l) for t, l in six.iteritems(actual_listeners_by_tuple): if t not in expected_listeners_by_tuple: to_delete.append(l) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have listeners modified:'.format(name)) for listener in to_create: msg.append('Listener {0} added.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) for listener in to_delete: msg.append('Listener {0} deleted.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) ret['result'] = None else: msg.append('Listeners already set on ELB {0}.'.format(name)) ret['comment'] = ' '.join(msg) return ret if to_delete: ports = [l['elb_port'] for l in to_delete] deleted = __salt__['boto_elb.delete_listeners'](name, ports, region, key, keyid, profile) if deleted: ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name) ret['result'] = False if to_create: created = __salt__['boto_elb.create_listeners'](name, to_create, region, key, keyid, profile) if created: msg = 'Created listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to create listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False if to_create or to_delete: ret['changes']['listeners'] = {} ret['changes']['listeners']['old'] = lb['listeners'] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['listeners']['new'] = lb['listeners'] else: ret['comment'] = 'Listeners already set on ELB {0}.'.format(name) return ret def _security_groups_present(name, security_groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not security_groups: security_groups = [] change_needed = False if set(security_groups) != set(lb['security_groups']): change_needed = True if change_needed: if __opts__['test']: ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name) ret['result'] = None return ret changed = __salt__['boto_elb.apply_security_groups']( name, security_groups, region, key, keyid, profile ) if changed: ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name) ret['result'] = False ret['changes']['old'] = {'security_groups': lb['security_groups']} ret['changes']['new'] = {'security_groups': security_groups} else: ret['comment'] = 'security_groups already set on ELB {0}.'.format(name) return ret def _attributes_present(name, attributes, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid, profile) if not _attributes: ret['result'] = False ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name) return ret attrs_to_set = [] if 'cross_zone_load_balancing' in attributes: czlb = attributes['cross_zone_load_balancing'] _czlb = _attributes['cross_zone_load_balancing'] if czlb['enabled'] != _czlb['enabled']: attrs_to_set.append('cross_zone_load_balancing') if 'connection_draining' in attributes: cd = attributes['connection_draining'] _cd = _attributes['connection_draining'] if (cd['enabled'] != _cd['enabled'] or cd.get('timeout', 300) != _cd.get('timeout')): attrs_to_set.append('connection_draining') if 'connecting_settings' in attributes: cs = attributes['connecting_settings'] _cs = _attributes['connecting_settings'] if cs['idle_timeout'] != _cs['idle_timeout']: attrs_to_set.append('connecting_settings') if 'access_log' in attributes: for attr, val in six.iteritems(attributes['access_log']): if six.text_type(_attributes['access_log'][attr]) != six.text_type(val): attrs_to_set.append('access_log') if 's3_bucket_prefix' in attributes['access_log']: sbp = attributes['access_log']['s3_bucket_prefix'] if sbp.startswith('/') or sbp.endswith('/'): raise SaltInvocationError('s3_bucket_prefix can not start or' ' end with /.') if attrs_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have attributes set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_attributes'](name, attributes, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'attributes': _attributes} ret['changes']['new'] = {'attributes': attributes} ret['comment'] = 'Set attributes on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name) else: ret['comment'] = 'Attributes already set on ELB {0}.'.format(name) return ret def _health_check_present(name, health_check, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not health_check: health_check = {} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) if not _health_check: ret['result'] = False ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name) return ret need_to_set = False for attr, val in six.iteritems(health_check): if six.text_type(_health_check[attr]) != six.text_type(val): need_to_set = True if need_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have health check set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_health_check'](name, health_check, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'health_check': _health_check} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) ret['changes']['new'] = {'health_check': _health_check} ret['comment'] = 'Set health check on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name) else: ret['comment'] = 'Health check already set on ELB {0}.'.format(name) return ret def _zones_present(name, availability_zones, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _zones = lb['availability_zones'] for zone in availability_zones: if zone not in _zones: to_enable.append(zone) for zone in _zones: if zone not in availability_zones: to_disable.append(zone) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have availability zones set.'.format(name) ret['result'] = None return ret if to_enable: enabled = __salt__['boto_elb.enable_availability_zones']( name, to_enable, region, key, keyid, profile) if enabled: ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name) ret['result'] = False if to_disable: disabled = __salt__['boto_elb.disable_availability_zones']( name, to_disable, region, key, keyid, profile) if disabled: msg = 'Disabled availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to disable availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False ret['changes']['old'] = {'availability_zones': lb['availability_zones']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'availability_zones': lb['availability_zones']} else: ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name) return ret def _subnets_present(name, subnets, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not subnets: subnets = [] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _subnets = lb['subnets'] for subnet in subnets: if subnet not in _subnets: to_enable.append(subnet) for subnet in _subnets: if subnet not in subnets: to_disable.append(subnet) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have subnets set.'.format(name) ret['result'] = None return ret if to_enable: attached = __salt__['boto_elb.attach_subnets'](name, to_enable, region, key, keyid, profile) if attached: ret['comment'] = 'Attached subnets on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name) ret['result'] = False if to_disable: detached = __salt__['boto_elb.detach_subnets'](name, to_disable, region, key, keyid, profile) if detached: ret['comment'] = ' '.join([ ret['comment'], 'Detached subnets on {0} ELB.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to detach subnets on {0} ELB.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'subnets': lb['subnets']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'subnets': lb['subnets']} else: ret['comment'] = 'Subnets already set on ELB {0}.'.format(name) return ret def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' current = __salt__['config.option'](alarms_from_pillar, {}) if alarms: current = salt.utils.dictupdate.update(current, alarms) ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(current): info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } # No test=False cluase needed since the state handles that itself... results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results.get('result'): ret["result"] = results["result"] if results.get("changes", {}) != {}: ret["changes"][info["name"]] = results["changes"] if "comment" in results: ret["comment"] += results["comment"] return ret def _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile): '''helper method for present. ensure that ELB policies are set''' if policies is None: policies = [] pillar_policies = __salt__['config.option'](policies_from_pillar, []) policies = policies + pillar_policies if backends is None: backends = [] # check for policy name uniqueness and correct type policy_names = set() for p in policies: if 'policy_name' not in p: raise SaltInvocationError('policy_name is a required value for ' 'policies.') if 'policy_type' not in p: raise SaltInvocationError('policy_type is a required value for ' 'policies.') if 'policy' not in p: raise SaltInvocationError('policy is a required value for ' 'listeners.') # check for unique policy names if p['policy_name'] in policy_names: raise SaltInvocationError('Policy names must be unique: policy {0}' ' is declared twice.'.format(p['policy_name'])) policy_names.add(p['policy_name']) # check that listeners refer to valid policy names for l in listeners: for p in l.get('policies', []): if p not in policy_names: raise SaltInvocationError('Listener {0} on ELB {1} refers to ' 'undefined policy {2}.'.format(l['elb_port'], name, p)) # check that backends refer to valid policy names for b in backends: for p in b.get('policies', []): if p not in policy_names: raise SaltInvocationError('Backend {0} on ELB {1} refers to ' 'undefined policy ' '{2}.'.format(b['instance_port'], name, p)) ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret # Policies have two names: # - a short name ('name') that's only the policy name (e.g. testpolicy) # - a canonical name ('cname') that contains the policy type and hash # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524) policies_by_cname = {} cnames_by_name = {} for p in policies: cname = _policy_cname(p) policies_by_cname[cname] = p cnames_by_name[p['policy_name']] = cname expected_policy_names = policies_by_cname.keys() actual_policy_names = lb['policies'] # This is sadly a huge hack to get around the fact that AWS assigns a # default SSLNegotiationPolicyType policy (with the naming scheme # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an # explicit policy set. If we don't keep track of the default policies and # explicitly exclude them from deletion, orchestration will fail because we # attempt to delete the default policy that's being used by listeners that # were created with no explicit policy. default_aws_policies = set() expected_policies_by_listener = {} for l in listeners: expected_policies_by_listener[l['elb_port']] = set( [cnames_by_name[p] for p in l.get('policies', [])]) actual_policies_by_listener = {} for l in lb['listeners']: listener_policies = set(l.get('policies', [])) actual_policies_by_listener[l['elb_port']] = listener_policies # Determine if any actual listener policies look like default policies, # so we can exclude them from deletion below (see note about this hack # above). for p in listener_policies: if re.match(r'^ELBSecurityPolicy-\d{4}-\d{2}$', p): default_aws_policies.add(p) expected_policies_by_backend = {} for b in backends: expected_policies_by_backend[b['instance_port']] = set( [cnames_by_name[p] for p in b.get('policies', [])]) actual_policies_by_backend = {} for b in lb['backends']: backend_policies = set(b.get('policies', [])) actual_policies_by_backend[b['instance_port']] = backend_policies to_delete = [] to_create = [] for policy_name in expected_policy_names: if policy_name not in actual_policy_names: to_create.append(policy_name) for policy_name in actual_policy_names: if policy_name not in expected_policy_names: if policy_name not in default_aws_policies: to_delete.append(policy_name) listeners_to_update = set() for port, policies in six.iteritems(expected_policies_by_listener): if policies != actual_policies_by_listener.get(port, set()): listeners_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_listener): if policies != expected_policies_by_listener.get(port, set()): listeners_to_update.add(port) backends_to_update = set() for port, policies in six.iteritems(expected_policies_by_backend): if policies != actual_policies_by_backend.get(port, set()): backends_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_backend): if policies != expected_policies_by_backend.get(port, set()): backends_to_update.add(port) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have policies modified:'.format(name)) for policy in to_create: msg.append('Policy {0} added.'.format(policy)) for policy in to_delete: msg.append('Policy {0} deleted.'.format(policy)) ret['result'] = None else: msg.append('Policies already set on ELB {0}.'.format(name)) for listener in listeners_to_update: msg.append('Listener {0} policies updated.'.format(listener)) for backend in backends_to_update: msg.append('Backend {0} policies updated.'.format(backend)) ret['comment'] = ' '.join(msg) return ret if to_create: for policy_name in to_create: created = __salt__['boto_elb.create_policy']( name=name, policy_name=policy_name, policy_type=policies_by_cname[policy_name]['policy_type'], policy=policies_by_cname[policy_name]['policy'], region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes'].setdefault(policy_name, {})['new'] = policy_name comment = "Policy {0} was created on ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in listeners_to_update: policy_set = __salt__['boto_elb.set_listener_policy']( name=name, port=port, policies=list(expected_policies_by_listener.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'listener_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_listener.get(port, [])), 'new': list(expected_policies_by_listener.get(port, [])), } comment = "Policy {0} was created on ELB {1} listener {2}".format( expected_policies_by_listener[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in backends_to_update: policy_set = __salt__['boto_elb.set_backend_policy']( name=name, port=port, policies=list(expected_policies_by_backend.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'backend_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_backend.get(port, [])), 'new': list(expected_policies_by_backend.get(port, [])), } comment = "Policy {0} was created on ELB {1} backend {2}".format( expected_policies_by_backend[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret if to_delete: for policy_name in to_delete: deleted = __salt__['boto_elb.delete_policy']( name=name, policy_name=policy_name, region=region, key=key, keyid=keyid, profile=profile) if deleted: ret['changes'].setdefault(policy_name, {})['old'] = policy_name comment = "Policy {0} was deleted from ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret return ret def _policy_cname(policy_dict): policy_name = policy_dict['policy_name'] policy_type = policy_dict['policy_type'] policy = policy_dict['policy'] canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0]))) policy_hash = hashlib.md5( salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function if policy_type.endswith('Type'): policy_type = policy_type[:-4] return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash) def _tags_present(name, tags, region, key, keyid, profile): ''' helper function to validate tags on elb ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) tags_to_add = tags tags_to_update = {} tags_to_remove = [] if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: tags_to_remove.append(_tag) else: if tags[_tag] != lb['tags'][_tag]: tags_to_update[_tag] = tags[_tag] tags_to_add.pop(_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: _ret = __salt__['boto_elb.delete_tags']( name, tags_to_remove, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove) ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for _tag in tags_to_remove: ret['changes']['old']['tags'][_tag] = lb['tags'][_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '. join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) else: all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update) _ret = __salt__['boto_elb.set_tags']( name, all_tag_changes, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in lb: if lb['tags']: if tag in lb['tags']: ret['changes']['old']['tags'][tag] = lb['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: msg = 'Tags are already set.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret
saltstack/salt
salt/states/boto_elb.py
_tags_present
python
def _tags_present(name, tags, region, key, keyid, profile): ''' helper function to validate tags on elb ''' ret = {'result': True, 'comment': '', 'changes': {}} if tags: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) tags_to_add = tags tags_to_update = {} tags_to_remove = [] if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: tags_to_remove.append(_tag) else: if tags[_tag] != lb['tags'][_tag]: tags_to_update[_tag] = tags[_tag] tags_to_add.pop(_tag) if tags_to_remove: if __opts__['test']: msg = 'The following tag{0} set to be removed: {1}.'.format( ('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove)) ret['comment'] = ' '.join([ret['comment'], msg]) ret['result'] = None else: _ret = __salt__['boto_elb.delete_tags']( name, tags_to_remove, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to delete tag {0}.'.format(tags_to_remove) ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) for _tag in tags_to_remove: ret['changes']['old']['tags'][_tag] = lb['tags'][_tag] if tags_to_add or tags_to_update: if __opts__['test']: if tags_to_add: msg = 'The following tag{0} set to be added: {1}.'.format( ('s are' if len(tags_to_add.keys()) > 1 else ' is'), ', '.join(tags_to_add.keys())) ret['comment'] = ' '. join([ret['comment'], msg]) ret['result'] = None if tags_to_update: msg = 'The following tag {0} set to be updated: {1}.'.format( ('values are' if len(tags_to_update.keys()) > 1 else 'value is'), ', '.join(tags_to_update.keys())) ret['comment'] = ' '.join([ret['comment'], msg]) else: all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update) _ret = __salt__['boto_elb.set_tags']( name, all_tag_changes, region, key, keyid, profile) if not _ret: ret['result'] = False msg = 'Error attempting to set tags.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret if 'old' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}}) if 'new' not in ret['changes']: ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}}) for tag in all_tag_changes: ret['changes']['new']['tags'][tag] = tags[tag] if 'tags' in lb: if lb['tags']: if tag in lb['tags']: ret['changes']['old']['tags'][tag] = lb['tags'][tag] if not tags_to_update and not tags_to_remove and not tags_to_add: msg = 'Tags are already set.' ret['comment'] = ' '.join([ret['comment'], msg]) return ret
helper function to validate tags on elb
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_elb.py#L1291-L1364
null
# -*- coding: utf-8 -*- ''' Manage ELBs .. versionadded:: 2014.7.0 Create and destroy ELBs. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml elb.keyid: GKTADJGHEIQSXMKKRBJ08H elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - availability_zones: - us-east-1a - us-east-1c - us-east-1d - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - listeners: - elb_port: 443 instance_port: 80 elb_protocol: HTTPS instance_protocol: HTTP certificate: 'arn:aws:iam::1111111:server-certificate/mycert' policies: - my-ssl-policy - cookie-policy - elb_port: 8210 instance_port: 8210 elb_protocol: TCP - backends: - instance_port: 80 policies: - enable-proxy-protocol - health_check: target: 'HTTP:80/' - attributes: cross_zone_load_balancing: enabled: true access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 connecting_settings: idle_timeout: 60 - cnames: - name: mycname.example.com. zone: example.com. ttl: 60 - name: myothercname.example.com. zone: example.com. - security_groups: - my-security-group - policies: - policy_name: my-ssl-policy policy_type: SSLNegotiationPolicyType policy: Protocol-TLSv1.2: true Protocol-SSLv3: false Server-Defined-Cipher-Order: true ECDHE-ECDSA-AES128-GCM-SHA256: true - policy_name: cookie-policy policy_type: LBCookieStickinessPolicyType policy: {} # no policy means this is a session cookie - policy_name: enable-proxy-protocol policy_type: ProxyProtocolPolicyType policy: ProxyProtocol: true # Using a profile from pillars Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile # Passing in a profile Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's possible to specify attributes from pillars by specifying a pillar. You can override the values defined in the pillard by setting the attributes on the resource. The module will use the default pillar key 'boto_elb_attributes', which allows you to set default attributes for all ELB resources. Setting the attributes pillar: .. code-block:: yaml my_elb_attributes: cross_zone_load_balancing: enabled: true connection_draining: enabled: true timeout: 20 access_log: enabled: true s3_bucket_name: 'mybucket' s3_bucket_prefix: 'my-logs' emit_interval: 5 Overriding the attribute values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - attributes_from_pillar: my_elb_attributes # override cross_zone_load_balancing:enabled - attributes: cross_zone_load_balancing: enabled: false - profile: myelbprofile It's possible to specify cloudwatch alarms that will be setup along with the ELB. Note the alarm name will be defined by the name attribute provided, plus the ELB resource name. .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_elb_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for a resource. Setting the alarms in a pillar: .. code-block:: yaml my_elb_alarm: UnHealthyHostCount: name: 'ELB UnHealthyHostCount **MANAGED BY SALT**' attributes: metric: UnHealthyHostCount namespace: AWS/ELB statistic: Average comparison: '>=' threshold: 1.0 period: 600 evaluation_periods: 6 unit: null description: ELB UnHealthyHostCount alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] insufficient_data_actions: [] ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm'] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - alarms_from_pillar: my_elb_alarm # override UnHealthyHostCount:attributes:threshold - alarms: UnHealthyHostCount: attributes: threshold: 2.0 Tags can also be set: .. versionadded:: 2016.3.0 .. code-block:: yaml Ensure myelb ELB exists: boto_elb.present: - name: myelb - region: us-east-1 - profile: myelbprofile - tags: MyTag: 'My Tag Value' OtherTag: 'My Other Value' ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs import hashlib import re import salt.utils.data import salt.utils.dictupdate import salt.utils.stringutils from salt.exceptions import SaltInvocationError from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_elb' if 'boto_elb.exists' in __salt__ else False def present(name, listeners, availability_zones=None, subnets=None, subnet_names=None, security_groups=None, scheme='internet-facing', health_check=None, attributes=None, attributes_from_pillar="boto_elb_attributes", cnames=None, alarms=None, alarms_from_pillar="boto_elb_alarms", policies=None, policies_from_pillar="boto_elb_policies", backends=None, region=None, key=None, keyid=None, profile=None, wait_for_sync=True, tags=None, instance_ids=None, instance_names=None): ''' Ensure the ELB exists. name Name of the ELB. availability_zones A list of availability zones for this ELB. listeners A list of listener lists; example:: [ ['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'], ['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert'] ] subnets A list of subnet IDs in your VPC to attach to your LoadBalancer. subnet_names A list of subnet names in your VPC to attach to your LoadBalancer. security_groups The security groups assigned to your LoadBalancer within your VPC. Must be passed either as a list or a comma-separated string. For example, a list: .. code-block:: yaml - security_groups: - secgroup-one - secgroup-two Or as a comma-separated string: .. code-block:: yaml - security_groups: secgroup-one,secgroup-two scheme The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once set, can not be modified. health_check A dict defining the health check for this ELB. attributes A dict defining the attributes to set on this ELB. Unknown keys will be silently ignored. See the :mod:`salt.modules.boto_elb.set_attributes` function for recognized attributes. attributes_from_pillar name of pillar dict that contains attributes. Attributes defined for this specific state will override those from pillar. cnames A list of cname dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. the cnames dict will be passed to the state as kwargs. See the :mod:`salt.states.boto_route53` state for information about these attributes. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB. All attributes should be specified except for dimension which will be automatically set to this ELB. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. wait_for_sync Wait for an INSYNC change status from Route53. tags dict of tags instance_ids list of instance ids. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_names. instance_names list of instance names. The state will ensure that these, and ONLY these, instances are registered with the ELB. This is additive with instance_ids. ''' # load data from attributes_from_pillar and merge with attributes tmp = __salt__['config.option'](attributes_from_pillar, {}) attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not isinstance(security_groups, (six.string_types, list, type(None))): msg = ("The 'security_group' parameter must be either a list or a " "comma-separated string.") log.error(msg) ret.update({'comment': msg, 'result': False}) return ret if isinstance(security_groups, six.string_types): security_groups = security_groups.split(',') _ret = _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile) ret.update({'changes': _ret['changes'], 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists and __opts__['test']: return ret if attributes: _ret = _attributes_present(name, attributes, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _health_check_present(name, health_check, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if cnames: lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if lb: for cname in cnames: _ret = None dns_provider = 'boto_route53' cname.update({'record_type': 'CNAME', 'value': lb['dns_name']}) if 'provider' in cname: dns_provider = cname.pop('provider') if dns_provider == 'boto_route53': for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'): cname[p] = locals().get(p) if p not in cname else cname[p] _ret = __states__['boto_route53.present'](**cname) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret _ret = _tags_present(name, tags, region, key, keyid, profile) ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']), 'comment': ' '.join([ret['comment'], _ret['comment']])}) ret['result'] = ret['result'] if _ret['result'] else _ret['result'] if ret['result'] is False: return ret if not instance_ids: instance_ids = [] if instance_names: # AWS borks on adding instances in "non-running" states, so filter 'em out. running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') for n in instance_names: instance_ids += __salt__['boto_ec2.find_instances']( name=n, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) # Backwards compat: Only touch attached instances if requested (e.g. if some are defined). if instance_ids: if __opts__['test']: if __salt__['boto_elb.set_instances']( name, instance_ids, True, region, key, keyid, profile): ret['comment'] += ' ELB {0} instances would be updated.'.format(name) ret['result'] = None else: success = __salt__['boto_elb.set_instances']( name, instance_ids, False, region, key, keyid, profile) if not success: ret['comment'] += "Failed to set requested instances." ret['result'] = False return ret def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): ''' Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from the ``instances`` list does not remove it from the ELB. name The name of the Elastic Load Balancer to add EC2 instances to. instances A list of EC2 instance IDs that this Elastic Load Balancer should distribute traffic to. This state will only ever append new instances to the ELB. EC2 instances already associated with this ELB will not be removed if they are not in the ``instances`` list. .. versionadded:: 2015.8.0 .. code-block:: yaml add-instances: boto_elb.register_instances: - name: myloadbalancer - instances: - instance-id1 - instance-id2 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not lb: msg = 'Could not find lb {0}'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) nodes = [value['instance_id'] for value in health if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not new: msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]')) log.debug(msg) ret.update({'comment': msg}) return ret if __opts__['test']: ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new) ret['result'] = None return ret state = __salt__['boto_elb.register_instances']( name, instances, region, key, keyid, profile) if state: msg = 'Load Balancer {0} has been changed'.format(name) log.info(msg) new = set().union(nodes, instances) ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes), 'new': '\n'.join(list(new))}}) else: msg = 'Load balancer {0} failed to add instances'.format(name) log.error(msg) ret.update({'comment': msg, 'result': False}) return ret DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies' def _elb_present(name, availability_zones, listeners, subnets, subnet_names, security_groups, scheme, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)): raise SaltInvocationError('Exactly one of availability_zones, subnets, ' 'subnet_names must be provided as arguments.') if not listeners: listeners = [] for listener in listeners: if len(listener) < 3: raise SaltInvocationError('Listeners must have at minimum port,' ' instance_port and protocol values in' ' the provided list.') if 'elb_port' not in listener: raise SaltInvocationError('elb_port is a required value for' ' listeners.') if 'instance_port' not in listener: raise SaltInvocationError('instance_port is a required value for' ' listeners.') if 'elb_protocol' not in listener: raise SaltInvocationError('elb_protocol is a required value for' ' listeners.') listener['elb_protocol'] = listener['elb_protocol'].upper() if listener['elb_protocol'] == 'HTTPS' and 'certificate' not in listener: raise SaltInvocationError('certificate is a required value for' ' listeners if HTTPS is set for' ' elb_protocol.') # best attempt at principle of least surprise here: # only use the default pillar in cases where we don't explicitly # define policies OR policies_from_pillar on a listener policies = listener.setdefault('policies', []) policies_pillar = listener.get('policies_from_pillar', None) if not policies and policies_pillar is None: policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY if policies_pillar: policies += __salt__['pillar.get'](policies_pillar, {}).get(listener['elb_protocol'], []) # Look up subnet ids from names if provided if subnet_names: subnets = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret subnets.append(r['id']) _security_groups = None if subnets: vpc_id = __salt__['boto_vpc.get_subnet_association'](subnets, region, key, keyid, profile) vpc_id = vpc_id.get('vpc_id') if not vpc_id: ret['comment'] = 'Subnets {0} do not map to a valid vpc id.'.format(subnets) ret['result'] = False return ret _security_groups = __salt__['boto_secgroup.convert_to_group_ids']( security_groups, vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) if not _security_groups: ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups) ret['result'] = False return ret exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_elb.create'](name=name, availability_zones=availability_zones, listeners=listeners, subnets=subnets, security_groups=_security_groups, scheme=scheme, region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes']['old'] = {'elb': None} ret['changes']['new'] = {'elb': name} ret['comment'] = 'ELB {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} ELB.'.format(name) else: ret['comment'] = 'ELB {0} present.'.format(name) _ret = _security_groups_present(name, _security_groups, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _listeners_present(name, listeners, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if availability_zones: _ret = _zones_present(name, availability_zones, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret elif subnets: _ret = _subnets_present(name, subnets, region, key, keyid, profile) ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _listeners_present(name, listeners, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not listeners: listeners = [] expected_listeners_by_tuple = {} for l in listeners: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) expected_listeners_by_tuple[l_key] = l actual_listeners_by_tuple = {} for l in lb['listeners']: l_key = __salt__['boto_elb.listener_dict_to_tuple'](l) actual_listeners_by_tuple[l_key] = l to_delete = [] to_create = [] for t, l in six.iteritems(expected_listeners_by_tuple): if t not in actual_listeners_by_tuple: to_create.append(l) for t, l in six.iteritems(actual_listeners_by_tuple): if t not in expected_listeners_by_tuple: to_delete.append(l) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have listeners modified:'.format(name)) for listener in to_create: msg.append('Listener {0} added.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) for listener in to_delete: msg.append('Listener {0} deleted.'.format( __salt__['boto_elb.listener_dict_to_tuple'](listener))) ret['result'] = None else: msg.append('Listeners already set on ELB {0}.'.format(name)) ret['comment'] = ' '.join(msg) return ret if to_delete: ports = [l['elb_port'] for l in to_delete] deleted = __salt__['boto_elb.delete_listeners'](name, ports, region, key, keyid, profile) if deleted: ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name) ret['result'] = False if to_create: created = __salt__['boto_elb.create_listeners'](name, to_create, region, key, keyid, profile) if created: msg = 'Created listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to create listeners on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False if to_create or to_delete: ret['changes']['listeners'] = {} ret['changes']['listeners']['old'] = lb['listeners'] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['listeners']['new'] = lb['listeners'] else: ret['comment'] = 'Listeners already set on ELB {0}.'.format(name) return ret def _security_groups_present(name, security_groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret if not security_groups: security_groups = [] change_needed = False if set(security_groups) != set(lb['security_groups']): change_needed = True if change_needed: if __opts__['test']: ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name) ret['result'] = None return ret changed = __salt__['boto_elb.apply_security_groups']( name, security_groups, region, key, keyid, profile ) if changed: ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name) ret['result'] = False ret['changes']['old'] = {'security_groups': lb['security_groups']} ret['changes']['new'] = {'security_groups': security_groups} else: ret['comment'] = 'security_groups already set on ELB {0}.'.format(name) return ret def _attributes_present(name, attributes, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} _attributes = __salt__['boto_elb.get_attributes'](name, region, key, keyid, profile) if not _attributes: ret['result'] = False ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name) return ret attrs_to_set = [] if 'cross_zone_load_balancing' in attributes: czlb = attributes['cross_zone_load_balancing'] _czlb = _attributes['cross_zone_load_balancing'] if czlb['enabled'] != _czlb['enabled']: attrs_to_set.append('cross_zone_load_balancing') if 'connection_draining' in attributes: cd = attributes['connection_draining'] _cd = _attributes['connection_draining'] if (cd['enabled'] != _cd['enabled'] or cd.get('timeout', 300) != _cd.get('timeout')): attrs_to_set.append('connection_draining') if 'connecting_settings' in attributes: cs = attributes['connecting_settings'] _cs = _attributes['connecting_settings'] if cs['idle_timeout'] != _cs['idle_timeout']: attrs_to_set.append('connecting_settings') if 'access_log' in attributes: for attr, val in six.iteritems(attributes['access_log']): if six.text_type(_attributes['access_log'][attr]) != six.text_type(val): attrs_to_set.append('access_log') if 's3_bucket_prefix' in attributes['access_log']: sbp = attributes['access_log']['s3_bucket_prefix'] if sbp.startswith('/') or sbp.endswith('/'): raise SaltInvocationError('s3_bucket_prefix can not start or' ' end with /.') if attrs_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have attributes set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_attributes'](name, attributes, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'attributes': _attributes} ret['changes']['new'] = {'attributes': attributes} ret['comment'] = 'Set attributes on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name) else: ret['comment'] = 'Attributes already set on ELB {0}.'.format(name) return ret def _health_check_present(name, health_check, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not health_check: health_check = {} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) if not _health_check: ret['result'] = False ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name) return ret need_to_set = False for attr, val in six.iteritems(health_check): if six.text_type(_health_check[attr]) != six.text_type(val): need_to_set = True if need_to_set: if __opts__['test']: ret['comment'] = 'ELB {0} set to have health check set.'.format(name) ret['result'] = None return ret was_set = __salt__['boto_elb.set_health_check'](name, health_check, region, key, keyid, profile) if was_set: ret['changes']['old'] = {'health_check': _health_check} _health_check = __salt__['boto_elb.get_health_check'](name, region, key, keyid, profile) ret['changes']['new'] = {'health_check': _health_check} ret['comment'] = 'Set health check on ELB {0}.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name) else: ret['comment'] = 'Health check already set on ELB {0}.'.format(name) return ret def _zones_present(name, availability_zones, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _zones = lb['availability_zones'] for zone in availability_zones: if zone not in _zones: to_enable.append(zone) for zone in _zones: if zone not in availability_zones: to_disable.append(zone) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have availability zones set.'.format(name) ret['result'] = None return ret if to_enable: enabled = __salt__['boto_elb.enable_availability_zones']( name, to_enable, region, key, keyid, profile) if enabled: ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name) ret['result'] = False if to_disable: disabled = __salt__['boto_elb.disable_availability_zones']( name, to_disable, region, key, keyid, profile) if disabled: msg = 'Disabled availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) else: msg = 'Failed to disable availability zones on {0} ELB.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False ret['changes']['old'] = {'availability_zones': lb['availability_zones']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'availability_zones': lb['availability_zones']} else: ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name) return ret def _subnets_present(name, subnets, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if not subnets: subnets = [] lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['result'] = False ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name) return ret to_enable = [] to_disable = [] _subnets = lb['subnets'] for subnet in subnets: if subnet not in _subnets: to_enable.append(subnet) for subnet in _subnets: if subnet not in subnets: to_disable.append(subnet) if to_enable or to_disable: if __opts__['test']: ret['comment'] = 'ELB {0} to have subnets set.'.format(name) ret['result'] = None return ret if to_enable: attached = __salt__['boto_elb.attach_subnets'](name, to_enable, region, key, keyid, profile) if attached: ret['comment'] = 'Attached subnets on {0} ELB.'.format(name) else: ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name) ret['result'] = False if to_disable: detached = __salt__['boto_elb.detach_subnets'](name, to_disable, region, key, keyid, profile) if detached: ret['comment'] = ' '.join([ ret['comment'], 'Detached subnets on {0} ELB.'.format(name) ]) else: ret['comment'] = ' '.join([ ret['comment'], 'Failed to detach subnets on {0} ELB.'.format(name) ]) ret['result'] = False ret['changes']['old'] = {'subnets': lb['subnets']} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) ret['changes']['new'] = {'subnets': lb['subnets']} else: ret['comment'] = 'Subnets already set on ELB {0}.'.format(name) return ret def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' current = __salt__['config.option'](alarms_from_pillar, {}) if alarms: current = salt.utils.dictupdate.update(current, alarms) ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(current): info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] info["attributes"]["dimensions"] = {"LoadBalancerName": [name]} kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } # No test=False cluase needed since the state handles that itself... results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results.get('result'): ret["result"] = results["result"] if results.get("changes", {}) != {}: ret["changes"][info["name"]] = results["changes"] if "comment" in results: ret["comment"] += results["comment"] return ret def _policies_present(name, policies, policies_from_pillar, listeners, backends, region, key, keyid, profile): '''helper method for present. ensure that ELB policies are set''' if policies is None: policies = [] pillar_policies = __salt__['config.option'](policies_from_pillar, []) policies = policies + pillar_policies if backends is None: backends = [] # check for policy name uniqueness and correct type policy_names = set() for p in policies: if 'policy_name' not in p: raise SaltInvocationError('policy_name is a required value for ' 'policies.') if 'policy_type' not in p: raise SaltInvocationError('policy_type is a required value for ' 'policies.') if 'policy' not in p: raise SaltInvocationError('policy is a required value for ' 'listeners.') # check for unique policy names if p['policy_name'] in policy_names: raise SaltInvocationError('Policy names must be unique: policy {0}' ' is declared twice.'.format(p['policy_name'])) policy_names.add(p['policy_name']) # check that listeners refer to valid policy names for l in listeners: for p in l.get('policies', []): if p not in policy_names: raise SaltInvocationError('Listener {0} on ELB {1} refers to ' 'undefined policy {2}.'.format(l['elb_port'], name, p)) # check that backends refer to valid policy names for b in backends: for p in b.get('policies', []): if p not in policy_names: raise SaltInvocationError('Backend {0} on ELB {1} refers to ' 'undefined policy ' '{2}.'.format(b['instance_port'], name, p)) ret = {'result': True, 'comment': '', 'changes': {}} lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile) if not lb: ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name) ret['result'] = False return ret # Policies have two names: # - a short name ('name') that's only the policy name (e.g. testpolicy) # - a canonical name ('cname') that contains the policy type and hash # (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524) policies_by_cname = {} cnames_by_name = {} for p in policies: cname = _policy_cname(p) policies_by_cname[cname] = p cnames_by_name[p['policy_name']] = cname expected_policy_names = policies_by_cname.keys() actual_policy_names = lb['policies'] # This is sadly a huge hack to get around the fact that AWS assigns a # default SSLNegotiationPolicyType policy (with the naming scheme # ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an # explicit policy set. If we don't keep track of the default policies and # explicitly exclude them from deletion, orchestration will fail because we # attempt to delete the default policy that's being used by listeners that # were created with no explicit policy. default_aws_policies = set() expected_policies_by_listener = {} for l in listeners: expected_policies_by_listener[l['elb_port']] = set( [cnames_by_name[p] for p in l.get('policies', [])]) actual_policies_by_listener = {} for l in lb['listeners']: listener_policies = set(l.get('policies', [])) actual_policies_by_listener[l['elb_port']] = listener_policies # Determine if any actual listener policies look like default policies, # so we can exclude them from deletion below (see note about this hack # above). for p in listener_policies: if re.match(r'^ELBSecurityPolicy-\d{4}-\d{2}$', p): default_aws_policies.add(p) expected_policies_by_backend = {} for b in backends: expected_policies_by_backend[b['instance_port']] = set( [cnames_by_name[p] for p in b.get('policies', [])]) actual_policies_by_backend = {} for b in lb['backends']: backend_policies = set(b.get('policies', [])) actual_policies_by_backend[b['instance_port']] = backend_policies to_delete = [] to_create = [] for policy_name in expected_policy_names: if policy_name not in actual_policy_names: to_create.append(policy_name) for policy_name in actual_policy_names: if policy_name not in expected_policy_names: if policy_name not in default_aws_policies: to_delete.append(policy_name) listeners_to_update = set() for port, policies in six.iteritems(expected_policies_by_listener): if policies != actual_policies_by_listener.get(port, set()): listeners_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_listener): if policies != expected_policies_by_listener.get(port, set()): listeners_to_update.add(port) backends_to_update = set() for port, policies in six.iteritems(expected_policies_by_backend): if policies != actual_policies_by_backend.get(port, set()): backends_to_update.add(port) for port, policies in six.iteritems(actual_policies_by_backend): if policies != expected_policies_by_backend.get(port, set()): backends_to_update.add(port) if __opts__['test']: msg = [] if to_create or to_delete: msg.append('ELB {0} set to have policies modified:'.format(name)) for policy in to_create: msg.append('Policy {0} added.'.format(policy)) for policy in to_delete: msg.append('Policy {0} deleted.'.format(policy)) ret['result'] = None else: msg.append('Policies already set on ELB {0}.'.format(name)) for listener in listeners_to_update: msg.append('Listener {0} policies updated.'.format(listener)) for backend in backends_to_update: msg.append('Backend {0} policies updated.'.format(backend)) ret['comment'] = ' '.join(msg) return ret if to_create: for policy_name in to_create: created = __salt__['boto_elb.create_policy']( name=name, policy_name=policy_name, policy_type=policies_by_cname[policy_name]['policy_type'], policy=policies_by_cname[policy_name]['policy'], region=region, key=key, keyid=keyid, profile=profile) if created: ret['changes'].setdefault(policy_name, {})['new'] = policy_name comment = "Policy {0} was created on ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in listeners_to_update: policy_set = __salt__['boto_elb.set_listener_policy']( name=name, port=port, policies=list(expected_policies_by_listener.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'listener_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_listener.get(port, [])), 'new': list(expected_policies_by_listener.get(port, [])), } comment = "Policy {0} was created on ELB {1} listener {2}".format( expected_policies_by_listener[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret for port in backends_to_update: policy_set = __salt__['boto_elb.set_backend_policy']( name=name, port=port, policies=list(expected_policies_by_backend.get(port, [])), region=region, key=key, keyid=keyid, profile=profile) if policy_set: policy_key = 'backend_{0}_policy'.format(port) ret['changes'][policy_key] = { 'old': list(actual_policies_by_backend.get(port, [])), 'new': list(expected_policies_by_backend.get(port, [])), } comment = "Policy {0} was created on ELB {1} backend {2}".format( expected_policies_by_backend[port], name, port) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret if to_delete: for policy_name in to_delete: deleted = __salt__['boto_elb.delete_policy']( name=name, policy_name=policy_name, region=region, key=key, keyid=keyid, profile=profile) if deleted: ret['changes'].setdefault(policy_name, {})['old'] = policy_name comment = "Policy {0} was deleted from ELB {1}".format( policy_name, name) ret['comment'] = ' '.join([ret['comment'], comment]) ret['result'] = True else: ret['result'] = False return ret return ret def _policy_cname(policy_dict): policy_name = policy_dict['policy_name'] policy_type = policy_dict['policy_type'] policy = policy_dict['policy'] canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0]))) policy_hash = hashlib.md5( salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function if policy_type.endswith('Type'): policy_type = policy_type[:-4] return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash) def absent(name, region=None, key=None, keyid=None, profile=None): ''' Ensure an ELB does not exist name name of the ELB ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'ELB {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_elb.delete'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'elb': name} ret['changes']['new'] = {'elb': None} ret['comment'] = 'ELB {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} ELB.'.format(name) else: ret['comment'] = '{0} ELB does not exist.'.format(name) return ret
saltstack/salt
salt/utils/thread_local_proxy.py
ThreadLocalProxy.get_reference
python
def get_reference(proxy): ''' Return the object that is referenced by the specified proxy. If the proxy has not been bound to a reference for the current thread, the behavior depends on th the ``fallback_to_shared`` flag that has been specified when creating the proxy. If the flag has been set, the last reference that has been set by any thread is returned (and silently set as the reference for the current thread). If the flag has not been set, an ``AttributeError`` is raised. If the object references by this proxy is itself a proxy, that proxy is returned. Use ``unproxy`` for unwrapping the referenced object until it is not a proxy. proxy: proxy object for which the reference shall be returned. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. ''' thread_local = object.__getattribute__(proxy, '_thread_local') try: return thread_local.reference except AttributeError: fallback_to_shared = object.__getattribute__( proxy, '_fallback_to_shared') if fallback_to_shared: # If the reference has never been set in the current thread of # execution, we use the reference that has been last set by any # thread. reference = object.__getattribute__(proxy, '_last_reference') # We save the reference in the thread local so that future # calls to get_reference will have consistent results. ThreadLocalProxy.set_reference(proxy, reference) return reference else: # We could simply return None, but this would make it hard to # debug situations where the reference has not been set (the # problem might go unnoticed until some code tries to do # something with the returned object and it might not be easy to # find out from where the None value originates). # For this reason, we raise an AttributeError with an error # message explaining the problem. raise AttributeError( 'The proxy object has not been bound to a reference in this thread of execution.')
Return the object that is referenced by the specified proxy. If the proxy has not been bound to a reference for the current thread, the behavior depends on th the ``fallback_to_shared`` flag that has been specified when creating the proxy. If the flag has been set, the last reference that has been set by any thread is returned (and silently set as the reference for the current thread). If the flag has not been set, an ``AttributeError`` is raised. If the object references by this proxy is itself a proxy, that proxy is returned. Use ``unproxy`` for unwrapping the referenced object until it is not a proxy. proxy: proxy object for which the reference shall be returned. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thread_local_proxy.py#L47-L92
[ "def set_reference(proxy, new_reference):\n '''\n Set the reference to be used the current thread of execution.\n\n After calling this function, the specified proxy will act like it was\n the referenced object.\n\n proxy:\n proxy object for which the reference shall be set. If the specified\n object is not an instance of `ThreadLocalProxy`, the behavior is\n unspecified. Typically, an ``AttributeError`` is going to be\n raised.\n\n new_reference:\n reference the proxy should point to for the current thread after\n calling this function.\n '''\n # If the new reference is itself a proxy, we have to ensure that it does\n # not refer to this proxy. If it does, we simply return because updating\n # the reference would result in an inifite loop when trying to use the\n # proxy.\n possible_proxy = new_reference\n while isinstance(possible_proxy, ThreadLocalProxy):\n if possible_proxy is proxy:\n return\n possible_proxy = ThreadLocalProxy.get_reference(possible_proxy)\n thread_local = object.__getattribute__(proxy, '_thread_local')\n thread_local.reference = new_reference\n object.__setattr__(proxy, '_last_reference', new_reference)\n" ]
class ThreadLocalProxy(object): ''' Proxy that delegates all operations to its referenced object. The referenced object is hold through a thread-local variable, so that this proxy may refer to different objects in different threads of execution. For all practical purposes (operators, attributes, `isinstance`), the proxy acts like the referenced object. Thus, code receiving the proxy object instead of the reference object typically does not have to be changed. The only exception is code that explicitly uses the ``type()`` function for checking the proxy's type. While `isinstance(proxy, ...)` will yield the expected results (based on the actual type of the referenced object), using something like ``issubclass(type(proxy), ...)`` will not work, because these tests will be made on the type of the proxy object instead of the type of the referenced object. In order to avoid this, such code must be changed to use ``issubclass(type(ThreadLocalProxy.unproxy(proxy)), ...)``. If an instance of this class is created with the ``fallback_to_shared`` flag set and a thread uses the instance without setting the reference explicitly, the reference for this thread is initialized with the latest reference set by any thread. This class has primarily been designed for use by the Salt loader, but it might also be useful in other places. ''' __slots__ = ['_thread_local', '_last_reference', '_fallback_to_shared'] @staticmethod @staticmethod def set_reference(proxy, new_reference): ''' Set the reference to be used the current thread of execution. After calling this function, the specified proxy will act like it was the referenced object. proxy: proxy object for which the reference shall be set. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. new_reference: reference the proxy should point to for the current thread after calling this function. ''' # If the new reference is itself a proxy, we have to ensure that it does # not refer to this proxy. If it does, we simply return because updating # the reference would result in an inifite loop when trying to use the # proxy. possible_proxy = new_reference while isinstance(possible_proxy, ThreadLocalProxy): if possible_proxy is proxy: return possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) thread_local = object.__getattribute__(proxy, '_thread_local') thread_local.reference = new_reference object.__setattr__(proxy, '_last_reference', new_reference) @staticmethod def unset_reference(proxy): ''' Unset the reference to be used by the current thread of execution. After calling this function, the specified proxy will act like the reference had never been set for the current thread. proxy: proxy object for which the reference shall be unset. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. ''' thread_local = object.__getattribute__(proxy, '_thread_local') del thread_local.reference @staticmethod def unproxy(possible_proxy): ''' Unwrap and return the object referenced by a proxy. This function is very similar to :func:`get_reference`, but works for both proxies and regular objects. If the specified object is a proxy, its reference is extracted with ``get_reference`` and returned. If it is not a proxy, it is returned as is. If the object references by the proxy is itself a proxy, the unwrapping is repeated until a regular (non-proxy) object is found. possible_proxy: object that might or might not be a proxy. ''' while isinstance(possible_proxy, ThreadLocalProxy): possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) return possible_proxy def __init__(self, initial_reference, fallback_to_shared=False): ''' Create a proxy object that references the specified object. initial_reference: object this proxy should initially reference (for the current thread of execution). The :func:`set_reference` function is called for the newly created proxy, passing this object. fallback_to_shared: flag indicating what should happen when the proxy is used in a thread where the reference has not been set explicitly. If ``True``, the thread's reference is silently initialized to use the reference last set by any thread. If ``False`` (the default), an exception is raised when the proxy is used in a thread without first initializing the reference in this thread. ''' object.__setattr__(self, '_thread_local', threading.local()) object.__setattr__(self, '_fallback_to_shared', fallback_to_shared) ThreadLocalProxy.set_reference(self, initial_reference) def __repr__(self): reference = ThreadLocalProxy.get_reference(self) return repr(reference) def __str__(self): reference = ThreadLocalProxy.get_reference(self) return str(reference) def __lt__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference < other def __le__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference <= other def __eq__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference == other def __ne__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference != other def __gt__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference > other def __ge__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference >= other def __hash__(self): reference = ThreadLocalProxy.get_reference(self) return hash(reference) def __nonzero__(self): reference = ThreadLocalProxy.get_reference(self) return bool(reference) def __getattr__(self, name): reference = ThreadLocalProxy.get_reference(self) # Old-style classes might not have a __getattr__ method, but using # getattr(...) will still work. try: original_method = reference.__getattr__ except AttributeError: return getattr(reference, name) return reference.__getattr__(name) def __setattr__(self, name, value): reference = ThreadLocalProxy.get_reference(self) reference.__setattr__(name, value) def __delattr__(self, name): reference = ThreadLocalProxy.get_reference(self) reference.__delattr__(name) def __getattribute__(self, name): reference = ThreadLocalProxy.get_reference(self) return reference.__getattribute__(name) def __call__(self, *args, **kwargs): reference = ThreadLocalProxy.get_reference(self) return reference(*args, **kwargs) def __len__(self): reference = ThreadLocalProxy.get_reference(self) return len(reference) def __getitem__(self, key): reference = ThreadLocalProxy.get_reference(self) return reference[key] def __setitem__(self, key, value): reference = ThreadLocalProxy.get_reference(self) reference[key] = value def __delitem__(self, key): reference = ThreadLocalProxy.get_reference(self) del reference[key] def __iter__(self): reference = ThreadLocalProxy.get_reference(self) return reference.__iter__() def __reversed__(self): reference = ThreadLocalProxy.get_reference(self) return reversed(reference) def __contains__(self, item): reference = ThreadLocalProxy.get_reference(self) return item in reference def __add__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference + other def __sub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference - other def __mul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference * other def __floordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference // other def __mod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference % other def __divmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return divmod(reference, other) def __pow__(self, other, modulo=None): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) modulo = ThreadLocalProxy.unproxy(modulo) if modulo is None: return pow(reference, other) else: return pow(reference, other, modulo) def __lshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference << other def __rshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference >> other def __and__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference & other def __xor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference ^ other def __or__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference | other def __div__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__div__ except AttributeError: return NotImplemented return func(other) def __truediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__truediv__ except AttributeError: return NotImplemented return func(other) def __radd__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other + reference def __rsub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other - reference def __rmul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other * reference def __rdiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__rdiv__ except AttributeError: return NotImplemented return func(other) def __rtruediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__rtruediv__ except AttributeError: return NotImplemented return func(other) def __rfloordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other // reference def __rmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other % reference def __rdivmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return divmod(other, reference) def __rpow__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other ** reference def __rlshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other << reference def __rrshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other >> reference def __rand__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other & reference def __rxor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other ^ reference def __ror__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other | reference def __iadd__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference += other ThreadLocalProxy.set_reference(self, reference) return reference def __isub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference -= other ThreadLocalProxy.set_reference(self, reference) return reference def __imul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference *= other ThreadLocalProxy.set_reference(self, reference) return reference def __idiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__idiv__ except AttributeError: return NotImplemented reference = func(other) ThreadLocalProxy.set_reference(self, reference) return reference def __itruediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__itruediv__ except AttributeError: return NotImplemented reference = func(other) ThreadLocalProxy.set_reference(self, reference) return reference def __ifloordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference //= other ThreadLocalProxy.set_reference(self, reference) return reference def __imod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference %= other ThreadLocalProxy.set_reference(self, reference) return reference def __ipow__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference **= other ThreadLocalProxy.set_reference(self, reference) return reference def __ilshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference <<= other ThreadLocalProxy.set_reference(self, reference) return reference def __irshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference >>= other ThreadLocalProxy.set_reference(self, reference) return reference def __iand__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference &= other ThreadLocalProxy.set_reference(self, reference) return reference def __ixor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference ^= other ThreadLocalProxy.set_reference(self, reference) return reference def __ior__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference |= other ThreadLocalProxy.set_reference(self, reference) return reference def __neg__(self): reference = ThreadLocalProxy.get_reference(self) return - reference def __pos__(self): reference = ThreadLocalProxy.get_reference(self) return + reference def __abs__(self): reference = ThreadLocalProxy.get_reference(self) return abs(reference) def __invert__(self): reference = ThreadLocalProxy.get_reference(self) return ~ reference def __complex__(self): reference = ThreadLocalProxy.get_reference(self) return complex(reference) def __int__(self): reference = ThreadLocalProxy.get_reference(self) return int(reference) def __float__(self): reference = ThreadLocalProxy.get_reference(self) return float(reference) def __oct__(self): reference = ThreadLocalProxy.get_reference(self) return oct(reference) def __hex__(self): reference = ThreadLocalProxy.get_reference(self) return hex(reference) def __index__(self): reference = ThreadLocalProxy.get_reference(self) try: func = reference.__index__ except AttributeError: return NotImplemented return func() def __coerce__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return coerce(reference, other) if six.PY2: # pylint: disable=incompatible-py3-code def __unicode__(self): reference = ThreadLocalProxy.get_reference(self) return unicode(reference) def __long__(self): reference = ThreadLocalProxy.get_reference(self) return long(reference)
saltstack/salt
salt/utils/thread_local_proxy.py
ThreadLocalProxy.set_reference
python
def set_reference(proxy, new_reference): ''' Set the reference to be used the current thread of execution. After calling this function, the specified proxy will act like it was the referenced object. proxy: proxy object for which the reference shall be set. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. new_reference: reference the proxy should point to for the current thread after calling this function. ''' # If the new reference is itself a proxy, we have to ensure that it does # not refer to this proxy. If it does, we simply return because updating # the reference would result in an inifite loop when trying to use the # proxy. possible_proxy = new_reference while isinstance(possible_proxy, ThreadLocalProxy): if possible_proxy is proxy: return possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) thread_local = object.__getattribute__(proxy, '_thread_local') thread_local.reference = new_reference object.__setattr__(proxy, '_last_reference', new_reference)
Set the reference to be used the current thread of execution. After calling this function, the specified proxy will act like it was the referenced object. proxy: proxy object for which the reference shall be set. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. new_reference: reference the proxy should point to for the current thread after calling this function.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thread_local_proxy.py#L95-L123
null
class ThreadLocalProxy(object): ''' Proxy that delegates all operations to its referenced object. The referenced object is hold through a thread-local variable, so that this proxy may refer to different objects in different threads of execution. For all practical purposes (operators, attributes, `isinstance`), the proxy acts like the referenced object. Thus, code receiving the proxy object instead of the reference object typically does not have to be changed. The only exception is code that explicitly uses the ``type()`` function for checking the proxy's type. While `isinstance(proxy, ...)` will yield the expected results (based on the actual type of the referenced object), using something like ``issubclass(type(proxy), ...)`` will not work, because these tests will be made on the type of the proxy object instead of the type of the referenced object. In order to avoid this, such code must be changed to use ``issubclass(type(ThreadLocalProxy.unproxy(proxy)), ...)``. If an instance of this class is created with the ``fallback_to_shared`` flag set and a thread uses the instance without setting the reference explicitly, the reference for this thread is initialized with the latest reference set by any thread. This class has primarily been designed for use by the Salt loader, but it might also be useful in other places. ''' __slots__ = ['_thread_local', '_last_reference', '_fallback_to_shared'] @staticmethod def get_reference(proxy): ''' Return the object that is referenced by the specified proxy. If the proxy has not been bound to a reference for the current thread, the behavior depends on th the ``fallback_to_shared`` flag that has been specified when creating the proxy. If the flag has been set, the last reference that has been set by any thread is returned (and silently set as the reference for the current thread). If the flag has not been set, an ``AttributeError`` is raised. If the object references by this proxy is itself a proxy, that proxy is returned. Use ``unproxy`` for unwrapping the referenced object until it is not a proxy. proxy: proxy object for which the reference shall be returned. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. ''' thread_local = object.__getattribute__(proxy, '_thread_local') try: return thread_local.reference except AttributeError: fallback_to_shared = object.__getattribute__( proxy, '_fallback_to_shared') if fallback_to_shared: # If the reference has never been set in the current thread of # execution, we use the reference that has been last set by any # thread. reference = object.__getattribute__(proxy, '_last_reference') # We save the reference in the thread local so that future # calls to get_reference will have consistent results. ThreadLocalProxy.set_reference(proxy, reference) return reference else: # We could simply return None, but this would make it hard to # debug situations where the reference has not been set (the # problem might go unnoticed until some code tries to do # something with the returned object and it might not be easy to # find out from where the None value originates). # For this reason, we raise an AttributeError with an error # message explaining the problem. raise AttributeError( 'The proxy object has not been bound to a reference in this thread of execution.') @staticmethod @staticmethod def unset_reference(proxy): ''' Unset the reference to be used by the current thread of execution. After calling this function, the specified proxy will act like the reference had never been set for the current thread. proxy: proxy object for which the reference shall be unset. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. ''' thread_local = object.__getattribute__(proxy, '_thread_local') del thread_local.reference @staticmethod def unproxy(possible_proxy): ''' Unwrap and return the object referenced by a proxy. This function is very similar to :func:`get_reference`, but works for both proxies and regular objects. If the specified object is a proxy, its reference is extracted with ``get_reference`` and returned. If it is not a proxy, it is returned as is. If the object references by the proxy is itself a proxy, the unwrapping is repeated until a regular (non-proxy) object is found. possible_proxy: object that might or might not be a proxy. ''' while isinstance(possible_proxy, ThreadLocalProxy): possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) return possible_proxy def __init__(self, initial_reference, fallback_to_shared=False): ''' Create a proxy object that references the specified object. initial_reference: object this proxy should initially reference (for the current thread of execution). The :func:`set_reference` function is called for the newly created proxy, passing this object. fallback_to_shared: flag indicating what should happen when the proxy is used in a thread where the reference has not been set explicitly. If ``True``, the thread's reference is silently initialized to use the reference last set by any thread. If ``False`` (the default), an exception is raised when the proxy is used in a thread without first initializing the reference in this thread. ''' object.__setattr__(self, '_thread_local', threading.local()) object.__setattr__(self, '_fallback_to_shared', fallback_to_shared) ThreadLocalProxy.set_reference(self, initial_reference) def __repr__(self): reference = ThreadLocalProxy.get_reference(self) return repr(reference) def __str__(self): reference = ThreadLocalProxy.get_reference(self) return str(reference) def __lt__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference < other def __le__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference <= other def __eq__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference == other def __ne__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference != other def __gt__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference > other def __ge__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference >= other def __hash__(self): reference = ThreadLocalProxy.get_reference(self) return hash(reference) def __nonzero__(self): reference = ThreadLocalProxy.get_reference(self) return bool(reference) def __getattr__(self, name): reference = ThreadLocalProxy.get_reference(self) # Old-style classes might not have a __getattr__ method, but using # getattr(...) will still work. try: original_method = reference.__getattr__ except AttributeError: return getattr(reference, name) return reference.__getattr__(name) def __setattr__(self, name, value): reference = ThreadLocalProxy.get_reference(self) reference.__setattr__(name, value) def __delattr__(self, name): reference = ThreadLocalProxy.get_reference(self) reference.__delattr__(name) def __getattribute__(self, name): reference = ThreadLocalProxy.get_reference(self) return reference.__getattribute__(name) def __call__(self, *args, **kwargs): reference = ThreadLocalProxy.get_reference(self) return reference(*args, **kwargs) def __len__(self): reference = ThreadLocalProxy.get_reference(self) return len(reference) def __getitem__(self, key): reference = ThreadLocalProxy.get_reference(self) return reference[key] def __setitem__(self, key, value): reference = ThreadLocalProxy.get_reference(self) reference[key] = value def __delitem__(self, key): reference = ThreadLocalProxy.get_reference(self) del reference[key] def __iter__(self): reference = ThreadLocalProxy.get_reference(self) return reference.__iter__() def __reversed__(self): reference = ThreadLocalProxy.get_reference(self) return reversed(reference) def __contains__(self, item): reference = ThreadLocalProxy.get_reference(self) return item in reference def __add__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference + other def __sub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference - other def __mul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference * other def __floordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference // other def __mod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference % other def __divmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return divmod(reference, other) def __pow__(self, other, modulo=None): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) modulo = ThreadLocalProxy.unproxy(modulo) if modulo is None: return pow(reference, other) else: return pow(reference, other, modulo) def __lshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference << other def __rshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference >> other def __and__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference & other def __xor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference ^ other def __or__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference | other def __div__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__div__ except AttributeError: return NotImplemented return func(other) def __truediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__truediv__ except AttributeError: return NotImplemented return func(other) def __radd__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other + reference def __rsub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other - reference def __rmul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other * reference def __rdiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__rdiv__ except AttributeError: return NotImplemented return func(other) def __rtruediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__rtruediv__ except AttributeError: return NotImplemented return func(other) def __rfloordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other // reference def __rmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other % reference def __rdivmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return divmod(other, reference) def __rpow__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other ** reference def __rlshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other << reference def __rrshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other >> reference def __rand__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other & reference def __rxor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other ^ reference def __ror__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other | reference def __iadd__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference += other ThreadLocalProxy.set_reference(self, reference) return reference def __isub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference -= other ThreadLocalProxy.set_reference(self, reference) return reference def __imul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference *= other ThreadLocalProxy.set_reference(self, reference) return reference def __idiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__idiv__ except AttributeError: return NotImplemented reference = func(other) ThreadLocalProxy.set_reference(self, reference) return reference def __itruediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__itruediv__ except AttributeError: return NotImplemented reference = func(other) ThreadLocalProxy.set_reference(self, reference) return reference def __ifloordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference //= other ThreadLocalProxy.set_reference(self, reference) return reference def __imod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference %= other ThreadLocalProxy.set_reference(self, reference) return reference def __ipow__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference **= other ThreadLocalProxy.set_reference(self, reference) return reference def __ilshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference <<= other ThreadLocalProxy.set_reference(self, reference) return reference def __irshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference >>= other ThreadLocalProxy.set_reference(self, reference) return reference def __iand__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference &= other ThreadLocalProxy.set_reference(self, reference) return reference def __ixor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference ^= other ThreadLocalProxy.set_reference(self, reference) return reference def __ior__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference |= other ThreadLocalProxy.set_reference(self, reference) return reference def __neg__(self): reference = ThreadLocalProxy.get_reference(self) return - reference def __pos__(self): reference = ThreadLocalProxy.get_reference(self) return + reference def __abs__(self): reference = ThreadLocalProxy.get_reference(self) return abs(reference) def __invert__(self): reference = ThreadLocalProxy.get_reference(self) return ~ reference def __complex__(self): reference = ThreadLocalProxy.get_reference(self) return complex(reference) def __int__(self): reference = ThreadLocalProxy.get_reference(self) return int(reference) def __float__(self): reference = ThreadLocalProxy.get_reference(self) return float(reference) def __oct__(self): reference = ThreadLocalProxy.get_reference(self) return oct(reference) def __hex__(self): reference = ThreadLocalProxy.get_reference(self) return hex(reference) def __index__(self): reference = ThreadLocalProxy.get_reference(self) try: func = reference.__index__ except AttributeError: return NotImplemented return func() def __coerce__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return coerce(reference, other) if six.PY2: # pylint: disable=incompatible-py3-code def __unicode__(self): reference = ThreadLocalProxy.get_reference(self) return unicode(reference) def __long__(self): reference = ThreadLocalProxy.get_reference(self) return long(reference)
saltstack/salt
salt/utils/thread_local_proxy.py
ThreadLocalProxy.unproxy
python
def unproxy(possible_proxy): ''' Unwrap and return the object referenced by a proxy. This function is very similar to :func:`get_reference`, but works for both proxies and regular objects. If the specified object is a proxy, its reference is extracted with ``get_reference`` and returned. If it is not a proxy, it is returned as is. If the object references by the proxy is itself a proxy, the unwrapping is repeated until a regular (non-proxy) object is found. possible_proxy: object that might or might not be a proxy. ''' while isinstance(possible_proxy, ThreadLocalProxy): possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) return possible_proxy
Unwrap and return the object referenced by a proxy. This function is very similar to :func:`get_reference`, but works for both proxies and regular objects. If the specified object is a proxy, its reference is extracted with ``get_reference`` and returned. If it is not a proxy, it is returned as is. If the object references by the proxy is itself a proxy, the unwrapping is repeated until a regular (non-proxy) object is found. possible_proxy: object that might or might not be a proxy.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thread_local_proxy.py#L143-L160
null
class ThreadLocalProxy(object): ''' Proxy that delegates all operations to its referenced object. The referenced object is hold through a thread-local variable, so that this proxy may refer to different objects in different threads of execution. For all practical purposes (operators, attributes, `isinstance`), the proxy acts like the referenced object. Thus, code receiving the proxy object instead of the reference object typically does not have to be changed. The only exception is code that explicitly uses the ``type()`` function for checking the proxy's type. While `isinstance(proxy, ...)` will yield the expected results (based on the actual type of the referenced object), using something like ``issubclass(type(proxy), ...)`` will not work, because these tests will be made on the type of the proxy object instead of the type of the referenced object. In order to avoid this, such code must be changed to use ``issubclass(type(ThreadLocalProxy.unproxy(proxy)), ...)``. If an instance of this class is created with the ``fallback_to_shared`` flag set and a thread uses the instance without setting the reference explicitly, the reference for this thread is initialized with the latest reference set by any thread. This class has primarily been designed for use by the Salt loader, but it might also be useful in other places. ''' __slots__ = ['_thread_local', '_last_reference', '_fallback_to_shared'] @staticmethod def get_reference(proxy): ''' Return the object that is referenced by the specified proxy. If the proxy has not been bound to a reference for the current thread, the behavior depends on th the ``fallback_to_shared`` flag that has been specified when creating the proxy. If the flag has been set, the last reference that has been set by any thread is returned (and silently set as the reference for the current thread). If the flag has not been set, an ``AttributeError`` is raised. If the object references by this proxy is itself a proxy, that proxy is returned. Use ``unproxy`` for unwrapping the referenced object until it is not a proxy. proxy: proxy object for which the reference shall be returned. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. ''' thread_local = object.__getattribute__(proxy, '_thread_local') try: return thread_local.reference except AttributeError: fallback_to_shared = object.__getattribute__( proxy, '_fallback_to_shared') if fallback_to_shared: # If the reference has never been set in the current thread of # execution, we use the reference that has been last set by any # thread. reference = object.__getattribute__(proxy, '_last_reference') # We save the reference in the thread local so that future # calls to get_reference will have consistent results. ThreadLocalProxy.set_reference(proxy, reference) return reference else: # We could simply return None, but this would make it hard to # debug situations where the reference has not been set (the # problem might go unnoticed until some code tries to do # something with the returned object and it might not be easy to # find out from where the None value originates). # For this reason, we raise an AttributeError with an error # message explaining the problem. raise AttributeError( 'The proxy object has not been bound to a reference in this thread of execution.') @staticmethod def set_reference(proxy, new_reference): ''' Set the reference to be used the current thread of execution. After calling this function, the specified proxy will act like it was the referenced object. proxy: proxy object for which the reference shall be set. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. new_reference: reference the proxy should point to for the current thread after calling this function. ''' # If the new reference is itself a proxy, we have to ensure that it does # not refer to this proxy. If it does, we simply return because updating # the reference would result in an inifite loop when trying to use the # proxy. possible_proxy = new_reference while isinstance(possible_proxy, ThreadLocalProxy): if possible_proxy is proxy: return possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) thread_local = object.__getattribute__(proxy, '_thread_local') thread_local.reference = new_reference object.__setattr__(proxy, '_last_reference', new_reference) @staticmethod def unset_reference(proxy): ''' Unset the reference to be used by the current thread of execution. After calling this function, the specified proxy will act like the reference had never been set for the current thread. proxy: proxy object for which the reference shall be unset. If the specified object is not an instance of `ThreadLocalProxy`, the behavior is unspecified. Typically, an ``AttributeError`` is going to be raised. ''' thread_local = object.__getattribute__(proxy, '_thread_local') del thread_local.reference @staticmethod def __init__(self, initial_reference, fallback_to_shared=False): ''' Create a proxy object that references the specified object. initial_reference: object this proxy should initially reference (for the current thread of execution). The :func:`set_reference` function is called for the newly created proxy, passing this object. fallback_to_shared: flag indicating what should happen when the proxy is used in a thread where the reference has not been set explicitly. If ``True``, the thread's reference is silently initialized to use the reference last set by any thread. If ``False`` (the default), an exception is raised when the proxy is used in a thread without first initializing the reference in this thread. ''' object.__setattr__(self, '_thread_local', threading.local()) object.__setattr__(self, '_fallback_to_shared', fallback_to_shared) ThreadLocalProxy.set_reference(self, initial_reference) def __repr__(self): reference = ThreadLocalProxy.get_reference(self) return repr(reference) def __str__(self): reference = ThreadLocalProxy.get_reference(self) return str(reference) def __lt__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference < other def __le__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference <= other def __eq__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference == other def __ne__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference != other def __gt__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference > other def __ge__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference >= other def __hash__(self): reference = ThreadLocalProxy.get_reference(self) return hash(reference) def __nonzero__(self): reference = ThreadLocalProxy.get_reference(self) return bool(reference) def __getattr__(self, name): reference = ThreadLocalProxy.get_reference(self) # Old-style classes might not have a __getattr__ method, but using # getattr(...) will still work. try: original_method = reference.__getattr__ except AttributeError: return getattr(reference, name) return reference.__getattr__(name) def __setattr__(self, name, value): reference = ThreadLocalProxy.get_reference(self) reference.__setattr__(name, value) def __delattr__(self, name): reference = ThreadLocalProxy.get_reference(self) reference.__delattr__(name) def __getattribute__(self, name): reference = ThreadLocalProxy.get_reference(self) return reference.__getattribute__(name) def __call__(self, *args, **kwargs): reference = ThreadLocalProxy.get_reference(self) return reference(*args, **kwargs) def __len__(self): reference = ThreadLocalProxy.get_reference(self) return len(reference) def __getitem__(self, key): reference = ThreadLocalProxy.get_reference(self) return reference[key] def __setitem__(self, key, value): reference = ThreadLocalProxy.get_reference(self) reference[key] = value def __delitem__(self, key): reference = ThreadLocalProxy.get_reference(self) del reference[key] def __iter__(self): reference = ThreadLocalProxy.get_reference(self) return reference.__iter__() def __reversed__(self): reference = ThreadLocalProxy.get_reference(self) return reversed(reference) def __contains__(self, item): reference = ThreadLocalProxy.get_reference(self) return item in reference def __add__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference + other def __sub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference - other def __mul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference * other def __floordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference // other def __mod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference % other def __divmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return divmod(reference, other) def __pow__(self, other, modulo=None): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) modulo = ThreadLocalProxy.unproxy(modulo) if modulo is None: return pow(reference, other) else: return pow(reference, other, modulo) def __lshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference << other def __rshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference >> other def __and__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference & other def __xor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference ^ other def __or__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return reference | other def __div__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__div__ except AttributeError: return NotImplemented return func(other) def __truediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__truediv__ except AttributeError: return NotImplemented return func(other) def __radd__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other + reference def __rsub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other - reference def __rmul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other * reference def __rdiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__rdiv__ except AttributeError: return NotImplemented return func(other) def __rtruediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__rtruediv__ except AttributeError: return NotImplemented return func(other) def __rfloordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other // reference def __rmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other % reference def __rdivmod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return divmod(other, reference) def __rpow__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other ** reference def __rlshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other << reference def __rrshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other >> reference def __rand__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other & reference def __rxor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other ^ reference def __ror__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return other | reference def __iadd__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference += other ThreadLocalProxy.set_reference(self, reference) return reference def __isub__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference -= other ThreadLocalProxy.set_reference(self, reference) return reference def __imul__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference *= other ThreadLocalProxy.set_reference(self, reference) return reference def __idiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__idiv__ except AttributeError: return NotImplemented reference = func(other) ThreadLocalProxy.set_reference(self, reference) return reference def __itruediv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) try: func = reference.__itruediv__ except AttributeError: return NotImplemented reference = func(other) ThreadLocalProxy.set_reference(self, reference) return reference def __ifloordiv__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference //= other ThreadLocalProxy.set_reference(self, reference) return reference def __imod__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference %= other ThreadLocalProxy.set_reference(self, reference) return reference def __ipow__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference **= other ThreadLocalProxy.set_reference(self, reference) return reference def __ilshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference <<= other ThreadLocalProxy.set_reference(self, reference) return reference def __irshift__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference >>= other ThreadLocalProxy.set_reference(self, reference) return reference def __iand__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference &= other ThreadLocalProxy.set_reference(self, reference) return reference def __ixor__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference ^= other ThreadLocalProxy.set_reference(self, reference) return reference def __ior__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) reference |= other ThreadLocalProxy.set_reference(self, reference) return reference def __neg__(self): reference = ThreadLocalProxy.get_reference(self) return - reference def __pos__(self): reference = ThreadLocalProxy.get_reference(self) return + reference def __abs__(self): reference = ThreadLocalProxy.get_reference(self) return abs(reference) def __invert__(self): reference = ThreadLocalProxy.get_reference(self) return ~ reference def __complex__(self): reference = ThreadLocalProxy.get_reference(self) return complex(reference) def __int__(self): reference = ThreadLocalProxy.get_reference(self) return int(reference) def __float__(self): reference = ThreadLocalProxy.get_reference(self) return float(reference) def __oct__(self): reference = ThreadLocalProxy.get_reference(self) return oct(reference) def __hex__(self): reference = ThreadLocalProxy.get_reference(self) return hex(reference) def __index__(self): reference = ThreadLocalProxy.get_reference(self) try: func = reference.__index__ except AttributeError: return NotImplemented return func() def __coerce__(self, other): reference = ThreadLocalProxy.get_reference(self) other = ThreadLocalProxy.unproxy(other) return coerce(reference, other) if six.PY2: # pylint: disable=incompatible-py3-code def __unicode__(self): reference = ThreadLocalProxy.get_reference(self) return unicode(reference) def __long__(self): reference = ThreadLocalProxy.get_reference(self) return long(reference)
saltstack/salt
salt/modules/kmod.py
_new_mods
python
def _new_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return post - pre
Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kmod.py#L26-L37
null
# -*- coding: utf-8 -*- ''' Module to manage Linux kernel modules ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import re import logging # Import salt libs import salt.utils.files import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): ''' Only runs on Linux systems ''' return __grains__['kernel'] == 'Linux' def _rm_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return pre - post def _get_modules_conf(): ''' Return location of modules config file. Default: /etc/modules ''' if 'systemd' in __grains__: return '/etc/modules-load.d/salt_managed.conf' return '/etc/modules' def _strip_module_name(mod): ''' Return module name and strip configuration. It is possible insert modules in this format: bonding mode=4 miimon=1000 This method return only 'bonding' ''' if mod.strip() == '': return False return mod.split()[0] def _set_persistent_module(mod): ''' Add module to configuration file to make it persistent. If module is commented uncomment it. ''' conf = _get_modules_conf() if not os.path.exists(conf): __salt__['file.touch'](conf) mod_name = _strip_module_name(mod) if not mod_name or mod_name in mod_list(True) or mod_name \ not in available(): return set() escape_mod = re.escape(mod) # If module is commented only uncomment it if __salt__['file.search'](conf, '^#[\t ]*{0}[\t ]*$'.format(escape_mod), multiline=True): __salt__['file.uncomment'](conf, escape_mod) else: __salt__['file.append'](conf, mod) return set([mod_name]) def _remove_persistent_module(mod, comment): ''' Remove module from configuration file. If comment is true only comment line where module is. ''' conf = _get_modules_conf() mod_name = _strip_module_name(mod) if not mod_name or mod_name not in mod_list(True): return set() escape_mod = re.escape(mod) if comment: __salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod)) else: __salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '') return set([mod_name]) def available(): ''' Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available ''' ret = [] mod_dir = os.path.join('/lib/modules/', os.uname()[2]) built_in_file = os.path.join(mod_dir, 'modules.builtin') if os.path.exists(built_in_file): with salt.utils.files.fopen(built_in_file, 'r') as f: for line in f: # Strip .ko from the basename ret.append(os.path.basename(line)[:-4]) for root, dirs, files in salt.utils.path.os_walk(mod_dir): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) if 'Arch' in __grains__['os_family']: # Sadly this path is relative to kernel major version but ignores minor version mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH' for root, dirs, files in salt.utils.path.os_walk(mod_dir_arch): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) return sorted(list(ret)) def check_available(mod): ''' Check to see if the specified kernel module is available CLI Example: .. code-block:: bash salt '*' kmod.check_available kvm ''' return mod in available() def lsmod(): ''' Return a dict containing information about currently loaded modules CLI Example: .. code-block:: bash salt '*' kmod.lsmod ''' ret = [] for line in __salt__['cmd.run']('lsmod').splitlines(): comps = line.split() if not len(comps) > 2: continue if comps[0] == 'Module': continue mdat = { 'size': comps[1], 'module': comps[0], 'depcount': comps[2], } if len(comps) > 3: mdat['deps'] = comps[3].split(',') else: mdat['deps'] = [] ret.append(mdat) return ret def mod_list(only_persist=False): ''' Return a list of the loaded module names only_persist Only return the list of loaded persistent modules CLI Example: .. code-block:: bash salt '*' kmod.mod_list ''' mods = set() if only_persist: conf = _get_modules_conf() if os.path.exists(conf): try: with salt.utils.files.fopen(conf, 'r') as modules_file: for line in modules_file: line = line.strip() mod_name = _strip_module_name(line) if not line.startswith('#') and mod_name: mods.add(mod_name) except IOError: log.error('kmod module could not open modules file at %s', conf) else: for mod in lsmod(): mods.add(mod['module']) return sorted(list(mods)) def load(mod, persist=False): ''' Load the specified kernel module mod Name of module to add persist Write module to /etc/modules to make it load on system reboot CLI Example: .. code-block:: bash salt '*' kmod.load kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('modprobe {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _new_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _set_persistent_module(mod) return sorted(list(mods | persist_mods)) else: return 'Error loading module {0}: {1}'.format(mod, res['stderr']) def is_loaded(mod): ''' Check to see if the specified kernel module is loaded CLI Example: .. code-block:: bash salt '*' kmod.is_loaded kvm ''' return mod in mod_list() def remove(mod, persist=False, comment=True): ''' Remove the specified kernel module mod Name of module to remove persist Also remove module from /etc/modules comment If persist is set don't remove line from /etc/modules but only comment it CLI Example: .. code-block:: bash salt '*' kmod.remove kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _rm_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _remove_persistent_module(mod, comment) return sorted(list(mods | persist_mods)) else: return 'Error removing module {0}: {1}'.format(mod, res['stderr'])
saltstack/salt
salt/modules/kmod.py
_rm_mods
python
def _rm_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return pre - post
Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kmod.py#L40-L51
null
# -*- coding: utf-8 -*- ''' Module to manage Linux kernel modules ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import re import logging # Import salt libs import salt.utils.files import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): ''' Only runs on Linux systems ''' return __grains__['kernel'] == 'Linux' def _new_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return post - pre def _get_modules_conf(): ''' Return location of modules config file. Default: /etc/modules ''' if 'systemd' in __grains__: return '/etc/modules-load.d/salt_managed.conf' return '/etc/modules' def _strip_module_name(mod): ''' Return module name and strip configuration. It is possible insert modules in this format: bonding mode=4 miimon=1000 This method return only 'bonding' ''' if mod.strip() == '': return False return mod.split()[0] def _set_persistent_module(mod): ''' Add module to configuration file to make it persistent. If module is commented uncomment it. ''' conf = _get_modules_conf() if not os.path.exists(conf): __salt__['file.touch'](conf) mod_name = _strip_module_name(mod) if not mod_name or mod_name in mod_list(True) or mod_name \ not in available(): return set() escape_mod = re.escape(mod) # If module is commented only uncomment it if __salt__['file.search'](conf, '^#[\t ]*{0}[\t ]*$'.format(escape_mod), multiline=True): __salt__['file.uncomment'](conf, escape_mod) else: __salt__['file.append'](conf, mod) return set([mod_name]) def _remove_persistent_module(mod, comment): ''' Remove module from configuration file. If comment is true only comment line where module is. ''' conf = _get_modules_conf() mod_name = _strip_module_name(mod) if not mod_name or mod_name not in mod_list(True): return set() escape_mod = re.escape(mod) if comment: __salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod)) else: __salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '') return set([mod_name]) def available(): ''' Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available ''' ret = [] mod_dir = os.path.join('/lib/modules/', os.uname()[2]) built_in_file = os.path.join(mod_dir, 'modules.builtin') if os.path.exists(built_in_file): with salt.utils.files.fopen(built_in_file, 'r') as f: for line in f: # Strip .ko from the basename ret.append(os.path.basename(line)[:-4]) for root, dirs, files in salt.utils.path.os_walk(mod_dir): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) if 'Arch' in __grains__['os_family']: # Sadly this path is relative to kernel major version but ignores minor version mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH' for root, dirs, files in salt.utils.path.os_walk(mod_dir_arch): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) return sorted(list(ret)) def check_available(mod): ''' Check to see if the specified kernel module is available CLI Example: .. code-block:: bash salt '*' kmod.check_available kvm ''' return mod in available() def lsmod(): ''' Return a dict containing information about currently loaded modules CLI Example: .. code-block:: bash salt '*' kmod.lsmod ''' ret = [] for line in __salt__['cmd.run']('lsmod').splitlines(): comps = line.split() if not len(comps) > 2: continue if comps[0] == 'Module': continue mdat = { 'size': comps[1], 'module': comps[0], 'depcount': comps[2], } if len(comps) > 3: mdat['deps'] = comps[3].split(',') else: mdat['deps'] = [] ret.append(mdat) return ret def mod_list(only_persist=False): ''' Return a list of the loaded module names only_persist Only return the list of loaded persistent modules CLI Example: .. code-block:: bash salt '*' kmod.mod_list ''' mods = set() if only_persist: conf = _get_modules_conf() if os.path.exists(conf): try: with salt.utils.files.fopen(conf, 'r') as modules_file: for line in modules_file: line = line.strip() mod_name = _strip_module_name(line) if not line.startswith('#') and mod_name: mods.add(mod_name) except IOError: log.error('kmod module could not open modules file at %s', conf) else: for mod in lsmod(): mods.add(mod['module']) return sorted(list(mods)) def load(mod, persist=False): ''' Load the specified kernel module mod Name of module to add persist Write module to /etc/modules to make it load on system reboot CLI Example: .. code-block:: bash salt '*' kmod.load kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('modprobe {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _new_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _set_persistent_module(mod) return sorted(list(mods | persist_mods)) else: return 'Error loading module {0}: {1}'.format(mod, res['stderr']) def is_loaded(mod): ''' Check to see if the specified kernel module is loaded CLI Example: .. code-block:: bash salt '*' kmod.is_loaded kvm ''' return mod in mod_list() def remove(mod, persist=False, comment=True): ''' Remove the specified kernel module mod Name of module to remove persist Also remove module from /etc/modules comment If persist is set don't remove line from /etc/modules but only comment it CLI Example: .. code-block:: bash salt '*' kmod.remove kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _rm_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _remove_persistent_module(mod, comment) return sorted(list(mods | persist_mods)) else: return 'Error removing module {0}: {1}'.format(mod, res['stderr'])
saltstack/salt
salt/modules/kmod.py
_set_persistent_module
python
def _set_persistent_module(mod): ''' Add module to configuration file to make it persistent. If module is commented uncomment it. ''' conf = _get_modules_conf() if not os.path.exists(conf): __salt__['file.touch'](conf) mod_name = _strip_module_name(mod) if not mod_name or mod_name in mod_list(True) or mod_name \ not in available(): return set() escape_mod = re.escape(mod) # If module is commented only uncomment it if __salt__['file.search'](conf, '^#[\t ]*{0}[\t ]*$'.format(escape_mod), multiline=True): __salt__['file.uncomment'](conf, escape_mod) else: __salt__['file.append'](conf, mod) return set([mod_name])
Add module to configuration file to make it persistent. If module is commented uncomment it.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kmod.py#L76-L96
null
# -*- coding: utf-8 -*- ''' Module to manage Linux kernel modules ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import re import logging # Import salt libs import salt.utils.files import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): ''' Only runs on Linux systems ''' return __grains__['kernel'] == 'Linux' def _new_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return post - pre def _rm_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return pre - post def _get_modules_conf(): ''' Return location of modules config file. Default: /etc/modules ''' if 'systemd' in __grains__: return '/etc/modules-load.d/salt_managed.conf' return '/etc/modules' def _strip_module_name(mod): ''' Return module name and strip configuration. It is possible insert modules in this format: bonding mode=4 miimon=1000 This method return only 'bonding' ''' if mod.strip() == '': return False return mod.split()[0] def _remove_persistent_module(mod, comment): ''' Remove module from configuration file. If comment is true only comment line where module is. ''' conf = _get_modules_conf() mod_name = _strip_module_name(mod) if not mod_name or mod_name not in mod_list(True): return set() escape_mod = re.escape(mod) if comment: __salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod)) else: __salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '') return set([mod_name]) def available(): ''' Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available ''' ret = [] mod_dir = os.path.join('/lib/modules/', os.uname()[2]) built_in_file = os.path.join(mod_dir, 'modules.builtin') if os.path.exists(built_in_file): with salt.utils.files.fopen(built_in_file, 'r') as f: for line in f: # Strip .ko from the basename ret.append(os.path.basename(line)[:-4]) for root, dirs, files in salt.utils.path.os_walk(mod_dir): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) if 'Arch' in __grains__['os_family']: # Sadly this path is relative to kernel major version but ignores minor version mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH' for root, dirs, files in salt.utils.path.os_walk(mod_dir_arch): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) return sorted(list(ret)) def check_available(mod): ''' Check to see if the specified kernel module is available CLI Example: .. code-block:: bash salt '*' kmod.check_available kvm ''' return mod in available() def lsmod(): ''' Return a dict containing information about currently loaded modules CLI Example: .. code-block:: bash salt '*' kmod.lsmod ''' ret = [] for line in __salt__['cmd.run']('lsmod').splitlines(): comps = line.split() if not len(comps) > 2: continue if comps[0] == 'Module': continue mdat = { 'size': comps[1], 'module': comps[0], 'depcount': comps[2], } if len(comps) > 3: mdat['deps'] = comps[3].split(',') else: mdat['deps'] = [] ret.append(mdat) return ret def mod_list(only_persist=False): ''' Return a list of the loaded module names only_persist Only return the list of loaded persistent modules CLI Example: .. code-block:: bash salt '*' kmod.mod_list ''' mods = set() if only_persist: conf = _get_modules_conf() if os.path.exists(conf): try: with salt.utils.files.fopen(conf, 'r') as modules_file: for line in modules_file: line = line.strip() mod_name = _strip_module_name(line) if not line.startswith('#') and mod_name: mods.add(mod_name) except IOError: log.error('kmod module could not open modules file at %s', conf) else: for mod in lsmod(): mods.add(mod['module']) return sorted(list(mods)) def load(mod, persist=False): ''' Load the specified kernel module mod Name of module to add persist Write module to /etc/modules to make it load on system reboot CLI Example: .. code-block:: bash salt '*' kmod.load kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('modprobe {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _new_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _set_persistent_module(mod) return sorted(list(mods | persist_mods)) else: return 'Error loading module {0}: {1}'.format(mod, res['stderr']) def is_loaded(mod): ''' Check to see if the specified kernel module is loaded CLI Example: .. code-block:: bash salt '*' kmod.is_loaded kvm ''' return mod in mod_list() def remove(mod, persist=False, comment=True): ''' Remove the specified kernel module mod Name of module to remove persist Also remove module from /etc/modules comment If persist is set don't remove line from /etc/modules but only comment it CLI Example: .. code-block:: bash salt '*' kmod.remove kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _rm_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _remove_persistent_module(mod, comment) return sorted(list(mods | persist_mods)) else: return 'Error removing module {0}: {1}'.format(mod, res['stderr'])
saltstack/salt
salt/modules/kmod.py
_remove_persistent_module
python
def _remove_persistent_module(mod, comment): ''' Remove module from configuration file. If comment is true only comment line where module is. ''' conf = _get_modules_conf() mod_name = _strip_module_name(mod) if not mod_name or mod_name not in mod_list(True): return set() escape_mod = re.escape(mod) if comment: __salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod)) else: __salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '') return set([mod_name])
Remove module from configuration file. If comment is true only comment line where module is.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kmod.py#L99-L113
null
# -*- coding: utf-8 -*- ''' Module to manage Linux kernel modules ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import re import logging # Import salt libs import salt.utils.files import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): ''' Only runs on Linux systems ''' return __grains__['kernel'] == 'Linux' def _new_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return post - pre def _rm_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return pre - post def _get_modules_conf(): ''' Return location of modules config file. Default: /etc/modules ''' if 'systemd' in __grains__: return '/etc/modules-load.d/salt_managed.conf' return '/etc/modules' def _strip_module_name(mod): ''' Return module name and strip configuration. It is possible insert modules in this format: bonding mode=4 miimon=1000 This method return only 'bonding' ''' if mod.strip() == '': return False return mod.split()[0] def _set_persistent_module(mod): ''' Add module to configuration file to make it persistent. If module is commented uncomment it. ''' conf = _get_modules_conf() if not os.path.exists(conf): __salt__['file.touch'](conf) mod_name = _strip_module_name(mod) if not mod_name or mod_name in mod_list(True) or mod_name \ not in available(): return set() escape_mod = re.escape(mod) # If module is commented only uncomment it if __salt__['file.search'](conf, '^#[\t ]*{0}[\t ]*$'.format(escape_mod), multiline=True): __salt__['file.uncomment'](conf, escape_mod) else: __salt__['file.append'](conf, mod) return set([mod_name]) def available(): ''' Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available ''' ret = [] mod_dir = os.path.join('/lib/modules/', os.uname()[2]) built_in_file = os.path.join(mod_dir, 'modules.builtin') if os.path.exists(built_in_file): with salt.utils.files.fopen(built_in_file, 'r') as f: for line in f: # Strip .ko from the basename ret.append(os.path.basename(line)[:-4]) for root, dirs, files in salt.utils.path.os_walk(mod_dir): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) if 'Arch' in __grains__['os_family']: # Sadly this path is relative to kernel major version but ignores minor version mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH' for root, dirs, files in salt.utils.path.os_walk(mod_dir_arch): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) return sorted(list(ret)) def check_available(mod): ''' Check to see if the specified kernel module is available CLI Example: .. code-block:: bash salt '*' kmod.check_available kvm ''' return mod in available() def lsmod(): ''' Return a dict containing information about currently loaded modules CLI Example: .. code-block:: bash salt '*' kmod.lsmod ''' ret = [] for line in __salt__['cmd.run']('lsmod').splitlines(): comps = line.split() if not len(comps) > 2: continue if comps[0] == 'Module': continue mdat = { 'size': comps[1], 'module': comps[0], 'depcount': comps[2], } if len(comps) > 3: mdat['deps'] = comps[3].split(',') else: mdat['deps'] = [] ret.append(mdat) return ret def mod_list(only_persist=False): ''' Return a list of the loaded module names only_persist Only return the list of loaded persistent modules CLI Example: .. code-block:: bash salt '*' kmod.mod_list ''' mods = set() if only_persist: conf = _get_modules_conf() if os.path.exists(conf): try: with salt.utils.files.fopen(conf, 'r') as modules_file: for line in modules_file: line = line.strip() mod_name = _strip_module_name(line) if not line.startswith('#') and mod_name: mods.add(mod_name) except IOError: log.error('kmod module could not open modules file at %s', conf) else: for mod in lsmod(): mods.add(mod['module']) return sorted(list(mods)) def load(mod, persist=False): ''' Load the specified kernel module mod Name of module to add persist Write module to /etc/modules to make it load on system reboot CLI Example: .. code-block:: bash salt '*' kmod.load kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('modprobe {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _new_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _set_persistent_module(mod) return sorted(list(mods | persist_mods)) else: return 'Error loading module {0}: {1}'.format(mod, res['stderr']) def is_loaded(mod): ''' Check to see if the specified kernel module is loaded CLI Example: .. code-block:: bash salt '*' kmod.is_loaded kvm ''' return mod in mod_list() def remove(mod, persist=False, comment=True): ''' Remove the specified kernel module mod Name of module to remove persist Also remove module from /etc/modules comment If persist is set don't remove line from /etc/modules but only comment it CLI Example: .. code-block:: bash salt '*' kmod.remove kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _rm_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _remove_persistent_module(mod, comment) return sorted(list(mods | persist_mods)) else: return 'Error removing module {0}: {1}'.format(mod, res['stderr'])
saltstack/salt
salt/modules/kmod.py
available
python
def available(): ''' Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available ''' ret = [] mod_dir = os.path.join('/lib/modules/', os.uname()[2]) built_in_file = os.path.join(mod_dir, 'modules.builtin') if os.path.exists(built_in_file): with salt.utils.files.fopen(built_in_file, 'r') as f: for line in f: # Strip .ko from the basename ret.append(os.path.basename(line)[:-4]) for root, dirs, files in salt.utils.path.os_walk(mod_dir): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) if 'Arch' in __grains__['os_family']: # Sadly this path is relative to kernel major version but ignores minor version mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH' for root, dirs, files in salt.utils.path.os_walk(mod_dir_arch): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) return sorted(list(ret))
Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kmod.py#L116-L150
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def os_walk(top, *args, **kwargs):\n '''\n This is a helper than ensures that all paths returned from os.walk are\n unicode.\n '''\n if six.PY2 and salt.utils.platform.is_windows():\n top_query = top\n else:\n top_query = salt.utils.stringutils.to_str(top)\n for item in os.walk(top_query, *args, **kwargs):\n yield salt.utils.data.decode(item, preserve_tuples=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to manage Linux kernel modules ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import os import re import logging # Import salt libs import salt.utils.files import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): ''' Only runs on Linux systems ''' return __grains__['kernel'] == 'Linux' def _new_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return post - pre def _rm_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an lsmod dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return pre - post def _get_modules_conf(): ''' Return location of modules config file. Default: /etc/modules ''' if 'systemd' in __grains__: return '/etc/modules-load.d/salt_managed.conf' return '/etc/modules' def _strip_module_name(mod): ''' Return module name and strip configuration. It is possible insert modules in this format: bonding mode=4 miimon=1000 This method return only 'bonding' ''' if mod.strip() == '': return False return mod.split()[0] def _set_persistent_module(mod): ''' Add module to configuration file to make it persistent. If module is commented uncomment it. ''' conf = _get_modules_conf() if not os.path.exists(conf): __salt__['file.touch'](conf) mod_name = _strip_module_name(mod) if not mod_name or mod_name in mod_list(True) or mod_name \ not in available(): return set() escape_mod = re.escape(mod) # If module is commented only uncomment it if __salt__['file.search'](conf, '^#[\t ]*{0}[\t ]*$'.format(escape_mod), multiline=True): __salt__['file.uncomment'](conf, escape_mod) else: __salt__['file.append'](conf, mod) return set([mod_name]) def _remove_persistent_module(mod, comment): ''' Remove module from configuration file. If comment is true only comment line where module is. ''' conf = _get_modules_conf() mod_name = _strip_module_name(mod) if not mod_name or mod_name not in mod_list(True): return set() escape_mod = re.escape(mod) if comment: __salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod)) else: __salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '') return set([mod_name]) def check_available(mod): ''' Check to see if the specified kernel module is available CLI Example: .. code-block:: bash salt '*' kmod.check_available kvm ''' return mod in available() def lsmod(): ''' Return a dict containing information about currently loaded modules CLI Example: .. code-block:: bash salt '*' kmod.lsmod ''' ret = [] for line in __salt__['cmd.run']('lsmod').splitlines(): comps = line.split() if not len(comps) > 2: continue if comps[0] == 'Module': continue mdat = { 'size': comps[1], 'module': comps[0], 'depcount': comps[2], } if len(comps) > 3: mdat['deps'] = comps[3].split(',') else: mdat['deps'] = [] ret.append(mdat) return ret def mod_list(only_persist=False): ''' Return a list of the loaded module names only_persist Only return the list of loaded persistent modules CLI Example: .. code-block:: bash salt '*' kmod.mod_list ''' mods = set() if only_persist: conf = _get_modules_conf() if os.path.exists(conf): try: with salt.utils.files.fopen(conf, 'r') as modules_file: for line in modules_file: line = line.strip() mod_name = _strip_module_name(line) if not line.startswith('#') and mod_name: mods.add(mod_name) except IOError: log.error('kmod module could not open modules file at %s', conf) else: for mod in lsmod(): mods.add(mod['module']) return sorted(list(mods)) def load(mod, persist=False): ''' Load the specified kernel module mod Name of module to add persist Write module to /etc/modules to make it load on system reboot CLI Example: .. code-block:: bash salt '*' kmod.load kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('modprobe {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _new_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _set_persistent_module(mod) return sorted(list(mods | persist_mods)) else: return 'Error loading module {0}: {1}'.format(mod, res['stderr']) def is_loaded(mod): ''' Check to see if the specified kernel module is loaded CLI Example: .. code-block:: bash salt '*' kmod.is_loaded kvm ''' return mod in mod_list() def remove(mod, persist=False, comment=True): ''' Remove the specified kernel module mod Name of module to remove persist Also remove module from /etc/modules comment If persist is set don't remove line from /etc/modules but only comment it CLI Example: .. code-block:: bash salt '*' kmod.remove kvm ''' pre_mods = lsmod() res = __salt__['cmd.run_all']('rmmod {0}'.format(mod), python_shell=False) if res['retcode'] == 0: post_mods = lsmod() mods = _rm_mods(pre_mods, post_mods) persist_mods = set() if persist: persist_mods = _remove_persistent_module(mod, comment) return sorted(list(mods | persist_mods)) else: return 'Error removing module {0}: {1}'.format(mod, res['stderr'])