repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
saltstack/salt
salt/states/docker_container.py
_resolve_image
def _resolve_image(ret, image, client_timeout): ''' Resolve the image ID and pull the image if necessary ''' image_id = __salt__['docker.resolve_image_id'](image) if image_id is False: if not __opts__['test']: # Image not pulled locally, so try pulling it try: pull_result = __salt__['docker.pull']( image, client_timeout=client_timeout, ) except Exception as exc: raise CommandExecutionError( 'Failed to pull {0}: {1}'.format(image, exc) ) else: ret['changes']['image'] = pull_result # Try resolving again now that we've pulled image_id = __salt__['docker.resolve_image_id'](image) if image_id is False: # Shouldn't happen unless the pull failed raise CommandExecutionError( 'Image \'{0}\' not present despite a docker pull ' 'raising no errors'.format(image) ) return image_id
python
def _resolve_image(ret, image, client_timeout): ''' Resolve the image ID and pull the image if necessary ''' image_id = __salt__['docker.resolve_image_id'](image) if image_id is False: if not __opts__['test']: # Image not pulled locally, so try pulling it try: pull_result = __salt__['docker.pull']( image, client_timeout=client_timeout, ) except Exception as exc: raise CommandExecutionError( 'Failed to pull {0}: {1}'.format(image, exc) ) else: ret['changes']['image'] = pull_result # Try resolving again now that we've pulled image_id = __salt__['docker.resolve_image_id'](image) if image_id is False: # Shouldn't happen unless the pull failed raise CommandExecutionError( 'Image \'{0}\' not present despite a docker pull ' 'raising no errors'.format(image) ) return image_id
[ "def", "_resolve_image", "(", "ret", ",", "image", ",", "client_timeout", ")", ":", "image_id", "=", "__salt__", "[", "'docker.resolve_image_id'", "]", "(", "image", ")", "if", "image_id", "is", "False", ":", "if", "not", "__opts__", "[", "'test'", "]", ":...
Resolve the image ID and pull the image if necessary
[ "Resolve", "the", "image", "ID", "and", "pull", "the", "image", "if", "necessary" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L189-L217
train
saltstack/salt
salt/states/docker_container.py
running
def running(name, image=None, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, force=False, watch_action='force', start=True, shutdown_timeout=None, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, networks=None, **kwargs): ''' Ensure that a container with a specific configuration is present and running name Name of the container image Image to use for the container .. note:: This state will pull the image if it is not present. However, if the image needs to be built from a Dockerfile or loaded from a saved image, or if you would like to use requisites to trigger a replacement of the container when the image is updated, then the :py:func:`docker_image.present <salt.states.dockermod.image_present>` state should be used to manage the image. .. versionchanged:: 2018.3.0 If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is no longer implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. .. _docker-container-running-skip-translate: skip_translate This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular control over which arguments are translated and which are not. Pass this argument as a comma-separated list (or Python list) of arguments, and translation for each passed argument name will be skipped. Alternatively, pass ``True`` and *all* translation will be skipped. Skipping tranlsation allows for arguments to be formatted directly in the format which docker-py_ expects. This allows for API changes and other issues to be more easily worked around. An example of using this option to skip translation would be: For example, imagine that there is an issue with processing the ``port_bindings`` argument, and the following configuration no longer works as expected: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - port_bindings: - 10.2.9.10:8080:80 By using ``skip_translate``, you can forego the input translation and configure the port binding in the format docker-py_ needs: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - skip_translate: port_bindings - port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314} See the following links for more information: - `docker-py Low-level API`_ - `Docker Engine API`_ .. _docker-py: https://pypi.python.org/pypi/docker-py .. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container .. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate ignore_collisions : False Since many of docker-py_'s arguments differ in name from their CLI counterparts (with which most Docker users are more familiar), Salt detects usage of these and aliases them to the docker-py_ version of that argument so that both CLI and API versions of a given argument are supported. However, if both the alias and the docker-py_ version of the same argument (e.g. ``env`` and ``environment``) are used, an error will be raised. Set this argument to ``True`` to suppress these errors and keep the docker-py_ version of the argument. validate_ip_addrs : True For parameters which accept IP addresses as input, IP address validation will be performed. To disable, set this to ``False`` force : False Set this parameter to ``True`` to force Salt to re-create the container irrespective of whether or not it is configured as desired. watch_action : force Control what type of action is taken when this state :ref:`watches <requisites-watch>` another state that has changes. The default action is ``force``, which runs the state with ``force`` set to ``True``, triggering a rebuild of the container. If any other value is passed, it will be assumed to be a kill signal. If the container matches the specified configuration, and is running, then the action will be to send that signal to the container. Kill signals can be either strings or numbers, and are defined in the **Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7 signal`` on a Linux host to browse this manpage. For example: .. code-block:: yaml mycontainer: docker_container.running: - image: busybox - watch_action: SIGHUP - watch: - file: some_file .. note:: If the container differs from the specified configuration, or is not running, then instead of sending a signal to the container, the container will be re-created/started and no signal will be sent. start : True Set to ``False`` to suppress starting of the container if it exists, matches the desired configuration, but is not running. This is useful for data-only containers, or for non-daemonized container processes, such as the Django ``migrate`` and ``collectstatic`` commands. In instances such as this, the container only needs to be started the first time. shutdown_timeout If the container needs to be replaced, the container will be stopped using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a ``shutdown_timout`` is not set, and the container was created using ``stop_timeout``, that timeout will be used. If neither of these values were set, then a timeout of 10 seconds will be used. .. versionchanged:: 2017.7.0 This option was renamed from ``stop_timeout`` to ``shutdown_timeout`` to accommodate the ``stop_timeout`` container configuration setting. client_timeout : 60 Timeout in seconds for the Docker client. This is not a timeout for this function, but for receiving a response from the API. .. note:: This is only used if Salt needs to pull the requested image. .. _salt-states-docker-container-network-management: **NETWORK MANAGEMENT** .. versionadded:: 2018.3.0 .. versionchanged:: 2019.2.0 If the ``networks`` option is used, any networks (including the default ``bridge`` network) which are not specified will be disconnected. The ``networks`` argument can be used to ensure that a container is attached to one or more networks. Optionally, arguments can be passed to the networks. In the example below, ``net1`` is being configured with arguments, while ``net2`` and ``bridge`` are being configured *without* arguments: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: - net1: - aliases: - bar - baz - ipv4_address: 10.0.20.50 - net2 - bridge - require: - docker_network: net1 - docker_network: net2 The supported arguments are the ones from the docker-py's `connect_container_to_network`_ function (other than ``container`` and ``net_id``). .. important:: Unlike with the arguments described in the **CONTAINER CONFIGURATION PARAMETERS** section below, these network configuration parameters are not translated at all. Consult the `connect_container_to_network`_ documentation for the correct type/format of data to pass. .. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network To start a container with no network connectivity (only possible in 2019.2.0 and later) pass this option as an empty list. For example: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: [] **CONTAINER CONFIGURATION PARAMETERS** auto_remove (or *rm*) : False Enable auto-removal of the container on daemon side when the container’s process exits (analogous to running a docker container with ``--rm`` on the CLI). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - auto_remove: True binds Files/directories to bind mount. Each bind mount should be passed in one of the following formats: - ``<host_path>:<container_path>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. - ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. Additionally, the specified selinux context will be set within the container. - ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. - ``<host_path>:<container_path>:<read_only>,<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. Additionally, the specified selinux context will be set within the container. ``<read_only>`` can be either ``rw`` for read-write access, or ``ro`` for read-only access. When omitted, it is assumed to be read-write. ``<selinux_context>`` can be ``z`` if the volume is shared between multiple containers, or ``Z`` if the volume should be private. .. note:: When both ``<read_only>`` and ``<selinux_context>`` are specified, there must be a comma before ``<selinux_context>``. Binds can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro - /home/myuser/conf/foo.conf:/etc/foo.conf:rw However, in cases where both ro/rw and an selinux context are combined, the only option is to use a YAML list, like so: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z Since the second bind in the previous example is mounted read-write, the ``rw`` and comma can be dropped. For example: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:Z blkio_weight Block IO weight (relative weight), accepts a weight value between 10 and 1000. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight: 100 blkio_weight_device Block IO weight (relative device weight), specified as a list of expressions in the format ``PATH:RATE`` .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight_device: /dev/sda:100 cap_add List of capabilities to add within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. cap_drop List of capabilities to drop within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. command (or *cmd*) Command to run in the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - command: bash cpuset_cpus (or *cpuset*) CPUs on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of CPUs (e.g. ``0,1``). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_cpus: "0,1" cpuset_mems Memory nodes on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of MEMs (e.g. ``0,1``). Only effective on NUMA systems. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_mems: "0,1" cpu_group The length of a CPU period in microseconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_group: 100000 cpu_period Microseconds of CPU time that the container can get in a CPU period .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_period: 50000 cpu_shares CPU shares (relative weight), specified as an integer between 2 and 1024. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_shares: 512 detach : False If ``True``, run the container's command in the background (daemon mode) .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - detach: True devices List of host devices to expose within the container. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: - /dev/net/tun - /dev/xvda1:/dev/xvda1 - /dev/xvdb1:/dev/xvdb1:r device_read_bps Limit read rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit read rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 device_write_bps Limit write rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit write rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 dns List of DNS nameservers. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: 8.8.8.8,8.8.4.4 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: - 8.8.8.8 - 8.8.4.4 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` dns_opt Additional options to be added to the container’s ``resolv.conf`` file. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: ndots:9 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: - ndots:9 dns_search List of DNS search domains. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: foo1.domain.tld,foo2.domain.tld .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: - foo1.domain.tld - foo2.domain.tld domainname The domain name to use for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dommainname: domain.tld entrypoint Entrypoint for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: "mycmd --arg1 --arg2" This argument can also be specified as a list: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: - mycmd - --arg1 - --arg2 environment Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1: value - VAR2: value .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: 'VAR1=value,VAR2=value' .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1=value - VAR2=value extra_hosts Additional hosts to add to the container's /etc/hosts file. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: web1:10.9.8.7,web2:10.9.8.8 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: - web1:10.9.8.7 - web2:10.9.8.8 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` .. note:: This option requires Docker 1.3.0 or newer. group_add List of additional group names and/or IDs that the container process will run as. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: web,network .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: - web - network hostname Hostname of the container. If not provided, the value passed as the container's``name`` will be used for the hostname. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 .. warning:: ``hostname`` cannot be set if ``network_mode`` is set to ``host``. The below example will result in an error: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 - network_mode: host interactive (or *stdin_open*) : False Leave stdin open, even if not attached .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - interactive: True ipc_mode (or *ipc*) Set the IPC mode for the container. The default behavior is to create a private IPC namespace for the container, but this option can be used to change that behavior: - ``container:<container_name_or_id>`` reuses another container shared memory, semaphores and message queues - ``host``: use the host's shared memory, semaphores and message queues .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: container:foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: host .. warning:: Using ``host`` gives the container full access to local shared memory and is therefore considered insecure. isolation Specifies the type of isolation technology used by containers .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - isolation: hyperv .. note:: The default value on Windows server is ``process``, while the default value on Windows client is ``hyperv``. On Linux, only ``default`` is supported. labels Add metadata to the container. Labels can be set both with and without values, and labels with values can be passed either as ``key=value`` or ``key: value`` pairs. For example, while the below would be very confusing to read, it is technically valid, and demonstrates the different ways in which labels can be passed: .. code-block:: yaml mynet: docker_network.present: - labels: - foo - bar=baz - hello: world The labels can also simply be passed as a YAML dictionary, though this can be error-prone due to some :ref:`idiosyncrasies <yaml-idiosyncrasies>` with how PyYAML loads nested data structures: .. code-block:: yaml foo: docker_network.present: - labels: foo: '' bar: baz hello: world .. versionchanged:: 2018.3.0 Methods for specifying labels can now be mixed. Earlier releases required either labels with or without values. links Link this container to another. Links can be specified as a list of mappings or a comma-separated or Python list of expressions in the format ``<container_name_or_id>:<link_alias>``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1: link1 - web2: link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: web1:link1,web2:link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1:link1 - web2:link2 log_driver and log_opt Set container's logging driver and options to configure that driver. Requires Docker 1.6 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address: tcp://192.168.0.42 - syslog-facility: daemon The ``log_opt`` can also be expressed as a comma-separated or YAML list of ``key=value`` pairs. The below two examples are equivalent to the above one: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address=tcp://192.168.0.42 - syslog-facility=daemon .. note:: The logging driver feature was improved in Docker 1.13 introducing option name changes. Please see Docker's `Configure logging drivers`_ documentation for more information. .. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/ lxc_conf Additional LXC configuration parameters to set before starting the container. Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname: docker - lxc.arch: x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: lxc.utsname=docker,lxc.arch=x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname=docker - lxc.arch=x86_64 .. note:: These LXC configuration parameters will only have the desired effect if the container is using the LXC execution driver, which has been deprecated for some time. mac_address MAC address to use for the container. If not specified, a random MAC address will be used. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mac_address: 01:23:45:67:89:0a mem_limit (or *memory*) : 0 Memory limit. Can be specified in bytes or using single-letter units (i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no memory limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_limit: 512M mem_swappiness Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_swappiness: 60 memswap_limit (or *memory_swap*) : -1 Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A value of ``0`` means no swap limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - memswap_limit: 1G network_disabled : False If ``True``, networking will be disabled within the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_disabled: True network_mode : bridge One of the following: - ``bridge`` - Creates a new network stack for the container on the docker bridge - ``none`` - No networking (equivalent of the Docker CLI argument ``--net=none``). Not to be confused with Python's ``None``. - ``container:<name_or_id>`` - Reuses another container's network stack - ``host`` - Use the host's network stack inside the container - Any name that identifies an existing network that might be created with ``docker.network_present``. .. warning:: Using ``host`` mode gives the container full access to the hosts system's services (such as D-bus), and is therefore considered insecure. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: "none" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: container:web1 oom_kill_disable Whether to disable OOM killer .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_kill_disable: False oom_score_adj An integer value containing the score given to the container in order to tune OOM killer preferences .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_score_adj: 500 pid_mode Set to ``host`` to use the host container's PID namespace within the container. Requires Docker 1.5.0 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pid_mode: host .. note:: This option requires Docker 1.5.0 or newer. pids_limit Set the container's PID limit. Set to ``-1`` for unlimited. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pids_limit: 2000 port_bindings (or *publish*) Bind exposed ports. Port bindings should be passed in the same way as the ``--publish`` argument to the ``docker run`` CLI command: - ``ip:hostPort:containerPort`` - Bind a specific IP and port on the host to a specific port within the container. - ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a specific port within the container. - ``hostPort:containerPort`` - Bind a specific port on all of the host's interfaces to a specific port within the container. - ``containerPort`` - Bind an ephemeral port on all of the host's interfaces to a specific port within the container. Multiple bindings can be separated by commas, or expressed as a YAML list, and port ranges can be defined using dashes. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: - 4505-4506:14505-14506 - 2123:2123/udp - 8080 .. note:: When specifying a protocol, it must be passed in the ``containerPort`` value, as seen in the examples above. ports A list of ports to expose on the container. Can either be a comma-separated list or a YAML list. If the protocol is omitted, the port will be assumed to be a TCP port. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 1111,2222/udp .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: - 1111 - 2222/udp privileged : False If ``True``, runs the exec process with extended privileges .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - privileged: True publish_all_ports (or *publish_all*) : False Publish all ports to the host .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 8080 - publish_all_ports: True read_only : False If ``True``, mount the container’s root filesystem as read only .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - read_only: True restart_policy (or *restart*) Set a restart policy for the container. Must be passed as a string in the format ``policy[:retry_count]`` where ``policy`` is one of ``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count`` is an optional limit to the number of retries. The retry count is ignored when using the ``always`` or ``unless-stopped`` restart policy. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - restart_policy: on-failure:5 bar: docker_container.running: - image: bar/baz:latest - restart_policy: always security_opt (or *security_opts*): Security configuration for MLS systems such as SELinux and AppArmor. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: apparmor:unconfined .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: - apparmor:unconfined .. important:: Some security options can contain commas. In these cases, this argument *must* be passed as a Python list, as splitting by comma will result in an invalid configuration. .. note:: See the documentation for security_opt at https://docs.docker.com/engine/reference/run/#security-configuration shm_size Size of /dev/shm .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - shm_size: 128M stop_signal Specify the signal docker will send to the container when stopping. Useful when running systemd as PID 1 inside the container. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_signal: SIGRTMIN+3 .. note:: This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or newer. .. versionadded:: 2016.11.0 stop_timeout Timeout to stop the container, in seconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_timeout: 5 .. note:: In releases prior to 2017.7.0, this option was not set in the container configuration, but rather this timeout was enforced only when shutting down an existing container to replace it. To remove the ambiguity, and to allow for the container to have a stop timeout set for it, the old ``stop_timeout`` argument has been renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's to the container's configured stop timeout. storage_opt Storage driver options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize: 40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: dm.basesize=40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize=40G sysctls (or *sysctl*) Set sysctl options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open: 1048576 - kernel.pid_max: 32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: fs.nr_open=1048576,kernel.pid_max=32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open=1048576 - kernel.pid_max=32768 tmpfs A map of container directories which should be replaced by tmpfs mounts and their corresponding mount options. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tmpfs: - /run: rw,noexec,nosuid,size=65536k tty : False Attach TTYs .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tty: True ulimits List of ulimits. These limits should be passed in the format ``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being optional. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: nofile=1024:1024,nproc=60 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: - nofile=1024:1024 - nproc=60 user User under which to run exec process .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - user: foo userns_mode (or *user_ns_mode*) Sets the user namsepace mode, when the user namespace remapping option is enabled .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - userns_mode: host volumes (or *volume*) List of directories to expose as volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: /mnt/vol1,/mnt/vol2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: - /mnt/vol1 - /mnt/vol2 volumes_from Container names or IDs from which the container will get volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: - foo volume_driver sets the container's volume driver .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volume_driver: foobar working_dir (or *workdir*) Working directory inside the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - working_dir: /var/log/nginx ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if image is None: ret['result'] = False ret['comment'] = 'The \'image\' argument is required' return ret elif not isinstance(image, six.string_types): image = six.text_type(image) try: # Since we're rewriting the "networks" value below, save the original # value here. configured_networks = networks networks = _parse_networks(networks) if networks: kwargs['networks'] = networks image_id = _resolve_image(ret, image, client_timeout) except CommandExecutionError as exc: ret['result'] = False if exc.info is not None: return _format_comments(ret, exc.info) else: ret['comment'] = exc.__str__() return ret comments = [] # Pop off the send_signal argument passed by the watch requisite send_signal = kwargs.pop('send_signal', False) try: current_image_id = __salt__['docker.inspect_container'](name)['Image'] except CommandExecutionError: current_image_id = None except KeyError: ret['result'] = False comments.append( 'Unable to detect current image for container \'{0}\'. ' 'This might be due to a change in the Docker API.'.format(name) ) return _format_comments(ret, comments) # Shorthand to make the below code more understandable exists = current_image_id is not None pre_state = __salt__['docker.state'](name) if exists else None # If skip_comparison is True, we're definitely going to be using the temp # container as the new container (because we're forcing the change, or # because the image IDs differ). If False, we'll need to perform a # comparison between it and the new container. skip_comparison = force or not exists or current_image_id != image_id if skip_comparison and __opts__['test']: ret['result'] = None if force: ret['changes']['forced_update'] = True elif current_image_id != image_id: ret['changes']['image'] = {'old': current_image_id, 'new': image_id} comments.append( 'Container \'{0}\' would be {1}'.format( name, 'created' if not exists else 'replaced' ) ) return _format_comments(ret, comments) # Create temp container (or just create the named container if the # container does not already exist) try: temp_container = __salt__['docker.create']( image, name=name if not exists else None, skip_translate=skip_translate, ignore_collisions=ignore_collisions, validate_ip_addrs=validate_ip_addrs, client_timeout=client_timeout, **kwargs) temp_container_name = temp_container['Name'] except KeyError as exc: ret['result'] = False comments.append( 'Key \'{0}\' missing from API response, this may be due to a ' 'change in the Docker Remote API. Please report this on the ' 'SaltStack issue tracker if it has not already been reported.' .format(exc) ) return _format_comments(ret, comments) except Exception as exc: ret['result'] = False msg = exc.__str__() if isinstance(exc, CommandExecutionError) \ and isinstance(exc.info, dict) and 'invalid' in exc.info: msg += ( '\n\nIf you feel this information is incorrect, the ' 'skip_translate argument can be used to skip input ' 'translation for the argument(s) identified as invalid. See ' 'the documentation for details.' ) comments.append(msg) return _format_comments(ret, comments) def _replace(orig, new): rm_kwargs = {'stop': True} if shutdown_timeout is not None: rm_kwargs['timeout'] = shutdown_timeout ret['changes'].setdefault('container_id', {})['removed'] = \ __salt__['docker.rm'](name, **rm_kwargs) try: result = __salt__['docker.rename'](new, orig) except CommandExecutionError as exc: result = False comments.append('Failed to rename temp container: {0}'.format(exc)) if result: comments.append('Replaced container \'{0}\''.format(orig)) else: comments.append('Failed to replace container \'{0}\'') return result def _delete_temp_container(): log.debug('Removing temp container \'%s\'', temp_container_name) __salt__['docker.rm'](temp_container_name) # If we're not skipping the comparison, then the assumption is that # temp_container will be discarded, unless the comparison reveals # differences, in which case we'll set cleanup_temp = False to prevent it # from being cleaned. cleanup_temp = not skip_comparison try: pre_net_connect = __salt__['docker.inspect_container']( name if exists else temp_container_name) for net_name, net_conf in six.iteritems(networks): try: __salt__['docker.connect_container_to_network']( temp_container_name, net_name, **net_conf) except CommandExecutionError as exc: # Shouldn't happen, stopped docker containers can be # attached to networks even if the static IP lies outside # of the network's subnet. An exception will be raised once # you try to start the container, however. ret['result'] = False comments.append(exc.__str__()) return _format_comments(ret, comments) post_net_connect = __salt__['docker.inspect_container']( temp_container_name) if configured_networks is not None: # Use set arithmetic to determine the networks which are connected # but not explicitly defined. They will be disconnected below. Note # that we check configured_networks because it represents the # original (unparsed) network configuration. When no networks # argument is used, the parsed networks will be an empty list, so # it's not sufficient to do a boolean check on the "networks" # variable. extra_nets = set( post_net_connect.get('NetworkSettings', {}).get('Networks', {}) ) - set(networks) if extra_nets: for extra_net in extra_nets: __salt__['docker.disconnect_container_from_network']( temp_container_name, extra_net) # We've made changes, so we need to inspect the container again post_net_connect = __salt__['docker.inspect_container']( temp_container_name) net_changes = __salt__['docker.compare_container_networks']( pre_net_connect, post_net_connect) if not skip_comparison: container_changes = __salt__['docker.compare_containers']( name, temp_container_name, ignore='Hostname', ) if container_changes: if _check_diff(container_changes): ret.setdefault('warnings', []).append( 'The detected changes may be due to incorrect ' 'handling of arguments in earlier Salt releases. If ' 'this warning persists after running the state ' 'again{0}, and no changes were made to the SLS file, ' 'then please report this.'.format( ' without test=True' if __opts__['test'] else '' ) ) changes_ptr = ret['changes'].setdefault('container', {}) changes_ptr.update(container_changes) if __opts__['test']: ret['result'] = None comments.append( 'Container \'{0}\' would be {1}'.format( name, 'created' if not exists else 'replaced' ) ) else: # We don't want to clean the temp container, we'll be # replacing the existing one with it. cleanup_temp = False # Replace the container if not _replace(name, temp_container_name): ret['result'] = False return _format_comments(ret, comments) ret['changes'].setdefault('container_id', {})['added'] = \ temp_container['Id'] else: # No changes between existing container and temp container. # First check if a requisite is asking to send a signal to the # existing container. if send_signal: if __opts__['test']: comments.append( 'Signal {0} would be sent to container'.format( watch_action ) ) else: try: __salt__['docker.signal'](name, signal=watch_action) except CommandExecutionError as exc: ret['result'] = False comments.append( 'Failed to signal container: {0}'.format(exc) ) return _format_comments(ret, comments) else: ret['changes']['signal'] = watch_action comments.append( 'Sent signal {0} to container'.format(watch_action) ) elif container_changes: if not comments: log.warning( 'docker_container.running: detected changes without ' 'a specific comment for container \'%s\'', name ) comments.append( 'Container \'{0}\'{1} updated.'.format( name, ' would be' if __opts__['test'] else '' ) ) else: # Container was not replaced, no differences between the # existing container and the temp container were detected, # and no signal was sent to the container. comments.append( 'Container \'{0}\' is already configured as specified' .format(name) ) if net_changes: ret['changes'].setdefault('container', {})['Networks'] = net_changes if __opts__['test']: ret['result'] = None comments.append('Network configuration would be updated') elif cleanup_temp: # We only need to make network changes if the container # isn't being replaced, since we would already have # attached all the networks for purposes of comparison. network_failure = False for net_name in sorted(net_changes): errors = [] disconnected = connected = False try: if name in __salt__['docker.connected'](net_name): __salt__['docker.disconnect_container_from_network']( name, net_name) disconnected = True except CommandExecutionError as exc: errors.append(exc.__str__()) if net_name in networks: try: __salt__['docker.connect_container_to_network']( name, net_name, **networks[net_name]) connected = True except CommandExecutionError as exc: errors.append(exc.__str__()) if disconnected: # We succeeded in disconnecting but failed # to reconnect. This can happen if the # network's subnet has changed and we try # to reconnect with the same IP address # from the old subnet. for item in list(net_changes[net_name]): if net_changes[net_name][item]['old'] is None: # Since they'd both be None, just # delete this key from the changes del net_changes[net_name][item] else: net_changes[net_name][item]['new'] = None if errors: comments.extend(errors) network_failure = True ret['changes'].setdefault( 'container', {}).setdefault( 'Networks', {})[net_name] = net_changes[net_name] if disconnected and connected: comments.append( 'Reconnected to network \'{0}\' with updated ' 'configuration'.format(net_name) ) elif disconnected: comments.append( 'Disconnected from network \'{0}\''.format( net_name ) ) elif connected: comments.append( 'Connected to network \'{0}\''.format(net_name) ) if network_failure: ret['result'] = False return _format_comments(ret, comments) finally: if cleanup_temp: _delete_temp_container() if skip_comparison: if not exists: comments.append('Created container \'{0}\''.format(name)) else: if not _replace(name, temp_container): ret['result'] = False return _format_comments(ret, comments) ret['changes'].setdefault('container_id', {})['added'] = \ temp_container['Id'] # "exists" means that a container by the specified name existed prior to # this state being run # "not cleanup_temp" means that the temp container became permanent, either # because the named container did not exist or changes were detected # "cleanup_temp" means that the container already existed and no changes # were detected, so the the temp container was discarded if not cleanup_temp and (not exists or (exists and start)) \ or (start and cleanup_temp and pre_state != 'running'): if __opts__['test']: ret['result'] = None comments.append('Container would be started') return _format_comments(ret, comments) else: try: post_state = __salt__['docker.start'](name)['state']['new'] except Exception as exc: ret['result'] = False comments.append( 'Failed to start container \'{0}\': \'{1}\''.format(name, exc) ) return _format_comments(ret, comments) else: post_state = __salt__['docker.state'](name) if not __opts__['test'] and post_state == 'running': # Now that we're certain the container is running, check each modified # network to see if the network went from static (or disconnected) to # automatic IP configuration. If so, grab the automatically-assigned # IPs and munge the changes dict to include them. Note that this can # only be done after the container is started bceause automatic IPs are # assigned at runtime. contextkey = '.'.join((name, 'docker_container.running')) def _get_nets(): if contextkey not in __context__: new_container_info = \ __salt__['docker.inspect_container'](name) __context__[contextkey] = new_container_info.get( 'NetworkSettings', {}).get('Networks', {}) return __context__[contextkey] autoip_keys = __opts__['docker.compare_container_networks'].get('automatic', []) for net_name, net_changes in six.iteritems( ret['changes'].get('container', {}).get('Networks', {})): if 'IPConfiguration' in net_changes \ and net_changes['IPConfiguration']['new'] == 'automatic': for key in autoip_keys: val = _get_nets().get(net_name, {}).get(key) if val: net_changes[key] = {'old': None, 'new': val} try: net_changes.pop('IPConfiguration') except KeyError: pass __context__.pop(contextkey, None) if pre_state != post_state: ret['changes']['state'] = {'old': pre_state, 'new': post_state} if pre_state is not None: comments.append( 'State changed from \'{0}\' to \'{1}\''.format( pre_state, post_state ) ) if exists and current_image_id != image_id: comments.append('Container has a new image') ret['changes']['image'] = {'old': current_image_id, 'new': image_id} if post_state != 'running' and start: ret['result'] = False comments.append('Container is not running') return _format_comments(ret, comments)
python
def running(name, image=None, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, force=False, watch_action='force', start=True, shutdown_timeout=None, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, networks=None, **kwargs): ''' Ensure that a container with a specific configuration is present and running name Name of the container image Image to use for the container .. note:: This state will pull the image if it is not present. However, if the image needs to be built from a Dockerfile or loaded from a saved image, or if you would like to use requisites to trigger a replacement of the container when the image is updated, then the :py:func:`docker_image.present <salt.states.dockermod.image_present>` state should be used to manage the image. .. versionchanged:: 2018.3.0 If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is no longer implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. .. _docker-container-running-skip-translate: skip_translate This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular control over which arguments are translated and which are not. Pass this argument as a comma-separated list (or Python list) of arguments, and translation for each passed argument name will be skipped. Alternatively, pass ``True`` and *all* translation will be skipped. Skipping tranlsation allows for arguments to be formatted directly in the format which docker-py_ expects. This allows for API changes and other issues to be more easily worked around. An example of using this option to skip translation would be: For example, imagine that there is an issue with processing the ``port_bindings`` argument, and the following configuration no longer works as expected: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - port_bindings: - 10.2.9.10:8080:80 By using ``skip_translate``, you can forego the input translation and configure the port binding in the format docker-py_ needs: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - skip_translate: port_bindings - port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314} See the following links for more information: - `docker-py Low-level API`_ - `Docker Engine API`_ .. _docker-py: https://pypi.python.org/pypi/docker-py .. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container .. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate ignore_collisions : False Since many of docker-py_'s arguments differ in name from their CLI counterparts (with which most Docker users are more familiar), Salt detects usage of these and aliases them to the docker-py_ version of that argument so that both CLI and API versions of a given argument are supported. However, if both the alias and the docker-py_ version of the same argument (e.g. ``env`` and ``environment``) are used, an error will be raised. Set this argument to ``True`` to suppress these errors and keep the docker-py_ version of the argument. validate_ip_addrs : True For parameters which accept IP addresses as input, IP address validation will be performed. To disable, set this to ``False`` force : False Set this parameter to ``True`` to force Salt to re-create the container irrespective of whether or not it is configured as desired. watch_action : force Control what type of action is taken when this state :ref:`watches <requisites-watch>` another state that has changes. The default action is ``force``, which runs the state with ``force`` set to ``True``, triggering a rebuild of the container. If any other value is passed, it will be assumed to be a kill signal. If the container matches the specified configuration, and is running, then the action will be to send that signal to the container. Kill signals can be either strings or numbers, and are defined in the **Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7 signal`` on a Linux host to browse this manpage. For example: .. code-block:: yaml mycontainer: docker_container.running: - image: busybox - watch_action: SIGHUP - watch: - file: some_file .. note:: If the container differs from the specified configuration, or is not running, then instead of sending a signal to the container, the container will be re-created/started and no signal will be sent. start : True Set to ``False`` to suppress starting of the container if it exists, matches the desired configuration, but is not running. This is useful for data-only containers, or for non-daemonized container processes, such as the Django ``migrate`` and ``collectstatic`` commands. In instances such as this, the container only needs to be started the first time. shutdown_timeout If the container needs to be replaced, the container will be stopped using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a ``shutdown_timout`` is not set, and the container was created using ``stop_timeout``, that timeout will be used. If neither of these values were set, then a timeout of 10 seconds will be used. .. versionchanged:: 2017.7.0 This option was renamed from ``stop_timeout`` to ``shutdown_timeout`` to accommodate the ``stop_timeout`` container configuration setting. client_timeout : 60 Timeout in seconds for the Docker client. This is not a timeout for this function, but for receiving a response from the API. .. note:: This is only used if Salt needs to pull the requested image. .. _salt-states-docker-container-network-management: **NETWORK MANAGEMENT** .. versionadded:: 2018.3.0 .. versionchanged:: 2019.2.0 If the ``networks`` option is used, any networks (including the default ``bridge`` network) which are not specified will be disconnected. The ``networks`` argument can be used to ensure that a container is attached to one or more networks. Optionally, arguments can be passed to the networks. In the example below, ``net1`` is being configured with arguments, while ``net2`` and ``bridge`` are being configured *without* arguments: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: - net1: - aliases: - bar - baz - ipv4_address: 10.0.20.50 - net2 - bridge - require: - docker_network: net1 - docker_network: net2 The supported arguments are the ones from the docker-py's `connect_container_to_network`_ function (other than ``container`` and ``net_id``). .. important:: Unlike with the arguments described in the **CONTAINER CONFIGURATION PARAMETERS** section below, these network configuration parameters are not translated at all. Consult the `connect_container_to_network`_ documentation for the correct type/format of data to pass. .. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network To start a container with no network connectivity (only possible in 2019.2.0 and later) pass this option as an empty list. For example: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: [] **CONTAINER CONFIGURATION PARAMETERS** auto_remove (or *rm*) : False Enable auto-removal of the container on daemon side when the container’s process exits (analogous to running a docker container with ``--rm`` on the CLI). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - auto_remove: True binds Files/directories to bind mount. Each bind mount should be passed in one of the following formats: - ``<host_path>:<container_path>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. - ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. Additionally, the specified selinux context will be set within the container. - ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. - ``<host_path>:<container_path>:<read_only>,<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. Additionally, the specified selinux context will be set within the container. ``<read_only>`` can be either ``rw`` for read-write access, or ``ro`` for read-only access. When omitted, it is assumed to be read-write. ``<selinux_context>`` can be ``z`` if the volume is shared between multiple containers, or ``Z`` if the volume should be private. .. note:: When both ``<read_only>`` and ``<selinux_context>`` are specified, there must be a comma before ``<selinux_context>``. Binds can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro - /home/myuser/conf/foo.conf:/etc/foo.conf:rw However, in cases where both ro/rw and an selinux context are combined, the only option is to use a YAML list, like so: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z Since the second bind in the previous example is mounted read-write, the ``rw`` and comma can be dropped. For example: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:Z blkio_weight Block IO weight (relative weight), accepts a weight value between 10 and 1000. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight: 100 blkio_weight_device Block IO weight (relative device weight), specified as a list of expressions in the format ``PATH:RATE`` .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight_device: /dev/sda:100 cap_add List of capabilities to add within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. cap_drop List of capabilities to drop within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. command (or *cmd*) Command to run in the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - command: bash cpuset_cpus (or *cpuset*) CPUs on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of CPUs (e.g. ``0,1``). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_cpus: "0,1" cpuset_mems Memory nodes on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of MEMs (e.g. ``0,1``). Only effective on NUMA systems. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_mems: "0,1" cpu_group The length of a CPU period in microseconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_group: 100000 cpu_period Microseconds of CPU time that the container can get in a CPU period .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_period: 50000 cpu_shares CPU shares (relative weight), specified as an integer between 2 and 1024. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_shares: 512 detach : False If ``True``, run the container's command in the background (daemon mode) .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - detach: True devices List of host devices to expose within the container. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: - /dev/net/tun - /dev/xvda1:/dev/xvda1 - /dev/xvdb1:/dev/xvdb1:r device_read_bps Limit read rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit read rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 device_write_bps Limit write rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit write rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 dns List of DNS nameservers. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: 8.8.8.8,8.8.4.4 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: - 8.8.8.8 - 8.8.4.4 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` dns_opt Additional options to be added to the container’s ``resolv.conf`` file. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: ndots:9 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: - ndots:9 dns_search List of DNS search domains. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: foo1.domain.tld,foo2.domain.tld .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: - foo1.domain.tld - foo2.domain.tld domainname The domain name to use for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dommainname: domain.tld entrypoint Entrypoint for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: "mycmd --arg1 --arg2" This argument can also be specified as a list: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: - mycmd - --arg1 - --arg2 environment Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1: value - VAR2: value .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: 'VAR1=value,VAR2=value' .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1=value - VAR2=value extra_hosts Additional hosts to add to the container's /etc/hosts file. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: web1:10.9.8.7,web2:10.9.8.8 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: - web1:10.9.8.7 - web2:10.9.8.8 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` .. note:: This option requires Docker 1.3.0 or newer. group_add List of additional group names and/or IDs that the container process will run as. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: web,network .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: - web - network hostname Hostname of the container. If not provided, the value passed as the container's``name`` will be used for the hostname. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 .. warning:: ``hostname`` cannot be set if ``network_mode`` is set to ``host``. The below example will result in an error: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 - network_mode: host interactive (or *stdin_open*) : False Leave stdin open, even if not attached .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - interactive: True ipc_mode (or *ipc*) Set the IPC mode for the container. The default behavior is to create a private IPC namespace for the container, but this option can be used to change that behavior: - ``container:<container_name_or_id>`` reuses another container shared memory, semaphores and message queues - ``host``: use the host's shared memory, semaphores and message queues .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: container:foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: host .. warning:: Using ``host`` gives the container full access to local shared memory and is therefore considered insecure. isolation Specifies the type of isolation technology used by containers .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - isolation: hyperv .. note:: The default value on Windows server is ``process``, while the default value on Windows client is ``hyperv``. On Linux, only ``default`` is supported. labels Add metadata to the container. Labels can be set both with and without values, and labels with values can be passed either as ``key=value`` or ``key: value`` pairs. For example, while the below would be very confusing to read, it is technically valid, and demonstrates the different ways in which labels can be passed: .. code-block:: yaml mynet: docker_network.present: - labels: - foo - bar=baz - hello: world The labels can also simply be passed as a YAML dictionary, though this can be error-prone due to some :ref:`idiosyncrasies <yaml-idiosyncrasies>` with how PyYAML loads nested data structures: .. code-block:: yaml foo: docker_network.present: - labels: foo: '' bar: baz hello: world .. versionchanged:: 2018.3.0 Methods for specifying labels can now be mixed. Earlier releases required either labels with or without values. links Link this container to another. Links can be specified as a list of mappings or a comma-separated or Python list of expressions in the format ``<container_name_or_id>:<link_alias>``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1: link1 - web2: link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: web1:link1,web2:link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1:link1 - web2:link2 log_driver and log_opt Set container's logging driver and options to configure that driver. Requires Docker 1.6 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address: tcp://192.168.0.42 - syslog-facility: daemon The ``log_opt`` can also be expressed as a comma-separated or YAML list of ``key=value`` pairs. The below two examples are equivalent to the above one: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address=tcp://192.168.0.42 - syslog-facility=daemon .. note:: The logging driver feature was improved in Docker 1.13 introducing option name changes. Please see Docker's `Configure logging drivers`_ documentation for more information. .. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/ lxc_conf Additional LXC configuration parameters to set before starting the container. Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname: docker - lxc.arch: x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: lxc.utsname=docker,lxc.arch=x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname=docker - lxc.arch=x86_64 .. note:: These LXC configuration parameters will only have the desired effect if the container is using the LXC execution driver, which has been deprecated for some time. mac_address MAC address to use for the container. If not specified, a random MAC address will be used. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mac_address: 01:23:45:67:89:0a mem_limit (or *memory*) : 0 Memory limit. Can be specified in bytes or using single-letter units (i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no memory limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_limit: 512M mem_swappiness Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_swappiness: 60 memswap_limit (or *memory_swap*) : -1 Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A value of ``0`` means no swap limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - memswap_limit: 1G network_disabled : False If ``True``, networking will be disabled within the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_disabled: True network_mode : bridge One of the following: - ``bridge`` - Creates a new network stack for the container on the docker bridge - ``none`` - No networking (equivalent of the Docker CLI argument ``--net=none``). Not to be confused with Python's ``None``. - ``container:<name_or_id>`` - Reuses another container's network stack - ``host`` - Use the host's network stack inside the container - Any name that identifies an existing network that might be created with ``docker.network_present``. .. warning:: Using ``host`` mode gives the container full access to the hosts system's services (such as D-bus), and is therefore considered insecure. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: "none" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: container:web1 oom_kill_disable Whether to disable OOM killer .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_kill_disable: False oom_score_adj An integer value containing the score given to the container in order to tune OOM killer preferences .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_score_adj: 500 pid_mode Set to ``host`` to use the host container's PID namespace within the container. Requires Docker 1.5.0 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pid_mode: host .. note:: This option requires Docker 1.5.0 or newer. pids_limit Set the container's PID limit. Set to ``-1`` for unlimited. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pids_limit: 2000 port_bindings (or *publish*) Bind exposed ports. Port bindings should be passed in the same way as the ``--publish`` argument to the ``docker run`` CLI command: - ``ip:hostPort:containerPort`` - Bind a specific IP and port on the host to a specific port within the container. - ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a specific port within the container. - ``hostPort:containerPort`` - Bind a specific port on all of the host's interfaces to a specific port within the container. - ``containerPort`` - Bind an ephemeral port on all of the host's interfaces to a specific port within the container. Multiple bindings can be separated by commas, or expressed as a YAML list, and port ranges can be defined using dashes. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: - 4505-4506:14505-14506 - 2123:2123/udp - 8080 .. note:: When specifying a protocol, it must be passed in the ``containerPort`` value, as seen in the examples above. ports A list of ports to expose on the container. Can either be a comma-separated list or a YAML list. If the protocol is omitted, the port will be assumed to be a TCP port. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 1111,2222/udp .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: - 1111 - 2222/udp privileged : False If ``True``, runs the exec process with extended privileges .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - privileged: True publish_all_ports (or *publish_all*) : False Publish all ports to the host .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 8080 - publish_all_ports: True read_only : False If ``True``, mount the container’s root filesystem as read only .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - read_only: True restart_policy (or *restart*) Set a restart policy for the container. Must be passed as a string in the format ``policy[:retry_count]`` where ``policy`` is one of ``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count`` is an optional limit to the number of retries. The retry count is ignored when using the ``always`` or ``unless-stopped`` restart policy. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - restart_policy: on-failure:5 bar: docker_container.running: - image: bar/baz:latest - restart_policy: always security_opt (or *security_opts*): Security configuration for MLS systems such as SELinux and AppArmor. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: apparmor:unconfined .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: - apparmor:unconfined .. important:: Some security options can contain commas. In these cases, this argument *must* be passed as a Python list, as splitting by comma will result in an invalid configuration. .. note:: See the documentation for security_opt at https://docs.docker.com/engine/reference/run/#security-configuration shm_size Size of /dev/shm .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - shm_size: 128M stop_signal Specify the signal docker will send to the container when stopping. Useful when running systemd as PID 1 inside the container. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_signal: SIGRTMIN+3 .. note:: This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or newer. .. versionadded:: 2016.11.0 stop_timeout Timeout to stop the container, in seconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_timeout: 5 .. note:: In releases prior to 2017.7.0, this option was not set in the container configuration, but rather this timeout was enforced only when shutting down an existing container to replace it. To remove the ambiguity, and to allow for the container to have a stop timeout set for it, the old ``stop_timeout`` argument has been renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's to the container's configured stop timeout. storage_opt Storage driver options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize: 40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: dm.basesize=40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize=40G sysctls (or *sysctl*) Set sysctl options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open: 1048576 - kernel.pid_max: 32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: fs.nr_open=1048576,kernel.pid_max=32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open=1048576 - kernel.pid_max=32768 tmpfs A map of container directories which should be replaced by tmpfs mounts and their corresponding mount options. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tmpfs: - /run: rw,noexec,nosuid,size=65536k tty : False Attach TTYs .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tty: True ulimits List of ulimits. These limits should be passed in the format ``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being optional. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: nofile=1024:1024,nproc=60 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: - nofile=1024:1024 - nproc=60 user User under which to run exec process .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - user: foo userns_mode (or *user_ns_mode*) Sets the user namsepace mode, when the user namespace remapping option is enabled .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - userns_mode: host volumes (or *volume*) List of directories to expose as volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: /mnt/vol1,/mnt/vol2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: - /mnt/vol1 - /mnt/vol2 volumes_from Container names or IDs from which the container will get volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: - foo volume_driver sets the container's volume driver .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volume_driver: foobar working_dir (or *workdir*) Working directory inside the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - working_dir: /var/log/nginx ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if image is None: ret['result'] = False ret['comment'] = 'The \'image\' argument is required' return ret elif not isinstance(image, six.string_types): image = six.text_type(image) try: # Since we're rewriting the "networks" value below, save the original # value here. configured_networks = networks networks = _parse_networks(networks) if networks: kwargs['networks'] = networks image_id = _resolve_image(ret, image, client_timeout) except CommandExecutionError as exc: ret['result'] = False if exc.info is not None: return _format_comments(ret, exc.info) else: ret['comment'] = exc.__str__() return ret comments = [] # Pop off the send_signal argument passed by the watch requisite send_signal = kwargs.pop('send_signal', False) try: current_image_id = __salt__['docker.inspect_container'](name)['Image'] except CommandExecutionError: current_image_id = None except KeyError: ret['result'] = False comments.append( 'Unable to detect current image for container \'{0}\'. ' 'This might be due to a change in the Docker API.'.format(name) ) return _format_comments(ret, comments) # Shorthand to make the below code more understandable exists = current_image_id is not None pre_state = __salt__['docker.state'](name) if exists else None # If skip_comparison is True, we're definitely going to be using the temp # container as the new container (because we're forcing the change, or # because the image IDs differ). If False, we'll need to perform a # comparison between it and the new container. skip_comparison = force or not exists or current_image_id != image_id if skip_comparison and __opts__['test']: ret['result'] = None if force: ret['changes']['forced_update'] = True elif current_image_id != image_id: ret['changes']['image'] = {'old': current_image_id, 'new': image_id} comments.append( 'Container \'{0}\' would be {1}'.format( name, 'created' if not exists else 'replaced' ) ) return _format_comments(ret, comments) # Create temp container (or just create the named container if the # container does not already exist) try: temp_container = __salt__['docker.create']( image, name=name if not exists else None, skip_translate=skip_translate, ignore_collisions=ignore_collisions, validate_ip_addrs=validate_ip_addrs, client_timeout=client_timeout, **kwargs) temp_container_name = temp_container['Name'] except KeyError as exc: ret['result'] = False comments.append( 'Key \'{0}\' missing from API response, this may be due to a ' 'change in the Docker Remote API. Please report this on the ' 'SaltStack issue tracker if it has not already been reported.' .format(exc) ) return _format_comments(ret, comments) except Exception as exc: ret['result'] = False msg = exc.__str__() if isinstance(exc, CommandExecutionError) \ and isinstance(exc.info, dict) and 'invalid' in exc.info: msg += ( '\n\nIf you feel this information is incorrect, the ' 'skip_translate argument can be used to skip input ' 'translation for the argument(s) identified as invalid. See ' 'the documentation for details.' ) comments.append(msg) return _format_comments(ret, comments) def _replace(orig, new): rm_kwargs = {'stop': True} if shutdown_timeout is not None: rm_kwargs['timeout'] = shutdown_timeout ret['changes'].setdefault('container_id', {})['removed'] = \ __salt__['docker.rm'](name, **rm_kwargs) try: result = __salt__['docker.rename'](new, orig) except CommandExecutionError as exc: result = False comments.append('Failed to rename temp container: {0}'.format(exc)) if result: comments.append('Replaced container \'{0}\''.format(orig)) else: comments.append('Failed to replace container \'{0}\'') return result def _delete_temp_container(): log.debug('Removing temp container \'%s\'', temp_container_name) __salt__['docker.rm'](temp_container_name) # If we're not skipping the comparison, then the assumption is that # temp_container will be discarded, unless the comparison reveals # differences, in which case we'll set cleanup_temp = False to prevent it # from being cleaned. cleanup_temp = not skip_comparison try: pre_net_connect = __salt__['docker.inspect_container']( name if exists else temp_container_name) for net_name, net_conf in six.iteritems(networks): try: __salt__['docker.connect_container_to_network']( temp_container_name, net_name, **net_conf) except CommandExecutionError as exc: # Shouldn't happen, stopped docker containers can be # attached to networks even if the static IP lies outside # of the network's subnet. An exception will be raised once # you try to start the container, however. ret['result'] = False comments.append(exc.__str__()) return _format_comments(ret, comments) post_net_connect = __salt__['docker.inspect_container']( temp_container_name) if configured_networks is not None: # Use set arithmetic to determine the networks which are connected # but not explicitly defined. They will be disconnected below. Note # that we check configured_networks because it represents the # original (unparsed) network configuration. When no networks # argument is used, the parsed networks will be an empty list, so # it's not sufficient to do a boolean check on the "networks" # variable. extra_nets = set( post_net_connect.get('NetworkSettings', {}).get('Networks', {}) ) - set(networks) if extra_nets: for extra_net in extra_nets: __salt__['docker.disconnect_container_from_network']( temp_container_name, extra_net) # We've made changes, so we need to inspect the container again post_net_connect = __salt__['docker.inspect_container']( temp_container_name) net_changes = __salt__['docker.compare_container_networks']( pre_net_connect, post_net_connect) if not skip_comparison: container_changes = __salt__['docker.compare_containers']( name, temp_container_name, ignore='Hostname', ) if container_changes: if _check_diff(container_changes): ret.setdefault('warnings', []).append( 'The detected changes may be due to incorrect ' 'handling of arguments in earlier Salt releases. If ' 'this warning persists after running the state ' 'again{0}, and no changes were made to the SLS file, ' 'then please report this.'.format( ' without test=True' if __opts__['test'] else '' ) ) changes_ptr = ret['changes'].setdefault('container', {}) changes_ptr.update(container_changes) if __opts__['test']: ret['result'] = None comments.append( 'Container \'{0}\' would be {1}'.format( name, 'created' if not exists else 'replaced' ) ) else: # We don't want to clean the temp container, we'll be # replacing the existing one with it. cleanup_temp = False # Replace the container if not _replace(name, temp_container_name): ret['result'] = False return _format_comments(ret, comments) ret['changes'].setdefault('container_id', {})['added'] = \ temp_container['Id'] else: # No changes between existing container and temp container. # First check if a requisite is asking to send a signal to the # existing container. if send_signal: if __opts__['test']: comments.append( 'Signal {0} would be sent to container'.format( watch_action ) ) else: try: __salt__['docker.signal'](name, signal=watch_action) except CommandExecutionError as exc: ret['result'] = False comments.append( 'Failed to signal container: {0}'.format(exc) ) return _format_comments(ret, comments) else: ret['changes']['signal'] = watch_action comments.append( 'Sent signal {0} to container'.format(watch_action) ) elif container_changes: if not comments: log.warning( 'docker_container.running: detected changes without ' 'a specific comment for container \'%s\'', name ) comments.append( 'Container \'{0}\'{1} updated.'.format( name, ' would be' if __opts__['test'] else '' ) ) else: # Container was not replaced, no differences between the # existing container and the temp container were detected, # and no signal was sent to the container. comments.append( 'Container \'{0}\' is already configured as specified' .format(name) ) if net_changes: ret['changes'].setdefault('container', {})['Networks'] = net_changes if __opts__['test']: ret['result'] = None comments.append('Network configuration would be updated') elif cleanup_temp: # We only need to make network changes if the container # isn't being replaced, since we would already have # attached all the networks for purposes of comparison. network_failure = False for net_name in sorted(net_changes): errors = [] disconnected = connected = False try: if name in __salt__['docker.connected'](net_name): __salt__['docker.disconnect_container_from_network']( name, net_name) disconnected = True except CommandExecutionError as exc: errors.append(exc.__str__()) if net_name in networks: try: __salt__['docker.connect_container_to_network']( name, net_name, **networks[net_name]) connected = True except CommandExecutionError as exc: errors.append(exc.__str__()) if disconnected: # We succeeded in disconnecting but failed # to reconnect. This can happen if the # network's subnet has changed and we try # to reconnect with the same IP address # from the old subnet. for item in list(net_changes[net_name]): if net_changes[net_name][item]['old'] is None: # Since they'd both be None, just # delete this key from the changes del net_changes[net_name][item] else: net_changes[net_name][item]['new'] = None if errors: comments.extend(errors) network_failure = True ret['changes'].setdefault( 'container', {}).setdefault( 'Networks', {})[net_name] = net_changes[net_name] if disconnected and connected: comments.append( 'Reconnected to network \'{0}\' with updated ' 'configuration'.format(net_name) ) elif disconnected: comments.append( 'Disconnected from network \'{0}\''.format( net_name ) ) elif connected: comments.append( 'Connected to network \'{0}\''.format(net_name) ) if network_failure: ret['result'] = False return _format_comments(ret, comments) finally: if cleanup_temp: _delete_temp_container() if skip_comparison: if not exists: comments.append('Created container \'{0}\''.format(name)) else: if not _replace(name, temp_container): ret['result'] = False return _format_comments(ret, comments) ret['changes'].setdefault('container_id', {})['added'] = \ temp_container['Id'] # "exists" means that a container by the specified name existed prior to # this state being run # "not cleanup_temp" means that the temp container became permanent, either # because the named container did not exist or changes were detected # "cleanup_temp" means that the container already existed and no changes # were detected, so the the temp container was discarded if not cleanup_temp and (not exists or (exists and start)) \ or (start and cleanup_temp and pre_state != 'running'): if __opts__['test']: ret['result'] = None comments.append('Container would be started') return _format_comments(ret, comments) else: try: post_state = __salt__['docker.start'](name)['state']['new'] except Exception as exc: ret['result'] = False comments.append( 'Failed to start container \'{0}\': \'{1}\''.format(name, exc) ) return _format_comments(ret, comments) else: post_state = __salt__['docker.state'](name) if not __opts__['test'] and post_state == 'running': # Now that we're certain the container is running, check each modified # network to see if the network went from static (or disconnected) to # automatic IP configuration. If so, grab the automatically-assigned # IPs and munge the changes dict to include them. Note that this can # only be done after the container is started bceause automatic IPs are # assigned at runtime. contextkey = '.'.join((name, 'docker_container.running')) def _get_nets(): if contextkey not in __context__: new_container_info = \ __salt__['docker.inspect_container'](name) __context__[contextkey] = new_container_info.get( 'NetworkSettings', {}).get('Networks', {}) return __context__[contextkey] autoip_keys = __opts__['docker.compare_container_networks'].get('automatic', []) for net_name, net_changes in six.iteritems( ret['changes'].get('container', {}).get('Networks', {})): if 'IPConfiguration' in net_changes \ and net_changes['IPConfiguration']['new'] == 'automatic': for key in autoip_keys: val = _get_nets().get(net_name, {}).get(key) if val: net_changes[key] = {'old': None, 'new': val} try: net_changes.pop('IPConfiguration') except KeyError: pass __context__.pop(contextkey, None) if pre_state != post_state: ret['changes']['state'] = {'old': pre_state, 'new': post_state} if pre_state is not None: comments.append( 'State changed from \'{0}\' to \'{1}\''.format( pre_state, post_state ) ) if exists and current_image_id != image_id: comments.append('Container has a new image') ret['changes']['image'] = {'old': current_image_id, 'new': image_id} if post_state != 'running' and start: ret['result'] = False comments.append('Container is not running') return _format_comments(ret, comments)
[ "def", "running", "(", "name", ",", "image", "=", "None", ",", "skip_translate", "=", "None", ",", "ignore_collisions", "=", "False", ",", "validate_ip_addrs", "=", "True", ",", "force", "=", "False", ",", "watch_action", "=", "'force'", ",", "start", "=",...
Ensure that a container with a specific configuration is present and running name Name of the container image Image to use for the container .. note:: This state will pull the image if it is not present. However, if the image needs to be built from a Dockerfile or loaded from a saved image, or if you would like to use requisites to trigger a replacement of the container when the image is updated, then the :py:func:`docker_image.present <salt.states.dockermod.image_present>` state should be used to manage the image. .. versionchanged:: 2018.3.0 If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is no longer implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. .. _docker-container-running-skip-translate: skip_translate This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular control over which arguments are translated and which are not. Pass this argument as a comma-separated list (or Python list) of arguments, and translation for each passed argument name will be skipped. Alternatively, pass ``True`` and *all* translation will be skipped. Skipping tranlsation allows for arguments to be formatted directly in the format which docker-py_ expects. This allows for API changes and other issues to be more easily worked around. An example of using this option to skip translation would be: For example, imagine that there is an issue with processing the ``port_bindings`` argument, and the following configuration no longer works as expected: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - port_bindings: - 10.2.9.10:8080:80 By using ``skip_translate``, you can forego the input translation and configure the port binding in the format docker-py_ needs: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - skip_translate: port_bindings - port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314} See the following links for more information: - `docker-py Low-level API`_ - `Docker Engine API`_ .. _docker-py: https://pypi.python.org/pypi/docker-py .. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container .. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate ignore_collisions : False Since many of docker-py_'s arguments differ in name from their CLI counterparts (with which most Docker users are more familiar), Salt detects usage of these and aliases them to the docker-py_ version of that argument so that both CLI and API versions of a given argument are supported. However, if both the alias and the docker-py_ version of the same argument (e.g. ``env`` and ``environment``) are used, an error will be raised. Set this argument to ``True`` to suppress these errors and keep the docker-py_ version of the argument. validate_ip_addrs : True For parameters which accept IP addresses as input, IP address validation will be performed. To disable, set this to ``False`` force : False Set this parameter to ``True`` to force Salt to re-create the container irrespective of whether or not it is configured as desired. watch_action : force Control what type of action is taken when this state :ref:`watches <requisites-watch>` another state that has changes. The default action is ``force``, which runs the state with ``force`` set to ``True``, triggering a rebuild of the container. If any other value is passed, it will be assumed to be a kill signal. If the container matches the specified configuration, and is running, then the action will be to send that signal to the container. Kill signals can be either strings or numbers, and are defined in the **Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7 signal`` on a Linux host to browse this manpage. For example: .. code-block:: yaml mycontainer: docker_container.running: - image: busybox - watch_action: SIGHUP - watch: - file: some_file .. note:: If the container differs from the specified configuration, or is not running, then instead of sending a signal to the container, the container will be re-created/started and no signal will be sent. start : True Set to ``False`` to suppress starting of the container if it exists, matches the desired configuration, but is not running. This is useful for data-only containers, or for non-daemonized container processes, such as the Django ``migrate`` and ``collectstatic`` commands. In instances such as this, the container only needs to be started the first time. shutdown_timeout If the container needs to be replaced, the container will be stopped using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a ``shutdown_timout`` is not set, and the container was created using ``stop_timeout``, that timeout will be used. If neither of these values were set, then a timeout of 10 seconds will be used. .. versionchanged:: 2017.7.0 This option was renamed from ``stop_timeout`` to ``shutdown_timeout`` to accommodate the ``stop_timeout`` container configuration setting. client_timeout : 60 Timeout in seconds for the Docker client. This is not a timeout for this function, but for receiving a response from the API. .. note:: This is only used if Salt needs to pull the requested image. .. _salt-states-docker-container-network-management: **NETWORK MANAGEMENT** .. versionadded:: 2018.3.0 .. versionchanged:: 2019.2.0 If the ``networks`` option is used, any networks (including the default ``bridge`` network) which are not specified will be disconnected. The ``networks`` argument can be used to ensure that a container is attached to one or more networks. Optionally, arguments can be passed to the networks. In the example below, ``net1`` is being configured with arguments, while ``net2`` and ``bridge`` are being configured *without* arguments: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: - net1: - aliases: - bar - baz - ipv4_address: 10.0.20.50 - net2 - bridge - require: - docker_network: net1 - docker_network: net2 The supported arguments are the ones from the docker-py's `connect_container_to_network`_ function (other than ``container`` and ``net_id``). .. important:: Unlike with the arguments described in the **CONTAINER CONFIGURATION PARAMETERS** section below, these network configuration parameters are not translated at all. Consult the `connect_container_to_network`_ documentation for the correct type/format of data to pass. .. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network To start a container with no network connectivity (only possible in 2019.2.0 and later) pass this option as an empty list. For example: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: [] **CONTAINER CONFIGURATION PARAMETERS** auto_remove (or *rm*) : False Enable auto-removal of the container on daemon side when the container’s process exits (analogous to running a docker container with ``--rm`` on the CLI). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - auto_remove: True binds Files/directories to bind mount. Each bind mount should be passed in one of the following formats: - ``<host_path>:<container_path>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. - ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. Additionally, the specified selinux context will be set within the container. - ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. - ``<host_path>:<container_path>:<read_only>,<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. Additionally, the specified selinux context will be set within the container. ``<read_only>`` can be either ``rw`` for read-write access, or ``ro`` for read-only access. When omitted, it is assumed to be read-write. ``<selinux_context>`` can be ``z`` if the volume is shared between multiple containers, or ``Z`` if the volume should be private. .. note:: When both ``<read_only>`` and ``<selinux_context>`` are specified, there must be a comma before ``<selinux_context>``. Binds can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro - /home/myuser/conf/foo.conf:/etc/foo.conf:rw However, in cases where both ro/rw and an selinux context are combined, the only option is to use a YAML list, like so: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z Since the second bind in the previous example is mounted read-write, the ``rw`` and comma can be dropped. For example: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:Z blkio_weight Block IO weight (relative weight), accepts a weight value between 10 and 1000. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight: 100 blkio_weight_device Block IO weight (relative device weight), specified as a list of expressions in the format ``PATH:RATE`` .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight_device: /dev/sda:100 cap_add List of capabilities to add within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. cap_drop List of capabilities to drop within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. command (or *cmd*) Command to run in the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - command: bash cpuset_cpus (or *cpuset*) CPUs on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of CPUs (e.g. ``0,1``). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_cpus: "0,1" cpuset_mems Memory nodes on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of MEMs (e.g. ``0,1``). Only effective on NUMA systems. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_mems: "0,1" cpu_group The length of a CPU period in microseconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_group: 100000 cpu_period Microseconds of CPU time that the container can get in a CPU period .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_period: 50000 cpu_shares CPU shares (relative weight), specified as an integer between 2 and 1024. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_shares: 512 detach : False If ``True``, run the container's command in the background (daemon mode) .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - detach: True devices List of host devices to expose within the container. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: - /dev/net/tun - /dev/xvda1:/dev/xvda1 - /dev/xvdb1:/dev/xvdb1:r device_read_bps Limit read rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit read rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 device_write_bps Limit write rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit write rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 dns List of DNS nameservers. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: 8.8.8.8,8.8.4.4 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: - 8.8.8.8 - 8.8.4.4 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` dns_opt Additional options to be added to the container’s ``resolv.conf`` file. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: ndots:9 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: - ndots:9 dns_search List of DNS search domains. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: foo1.domain.tld,foo2.domain.tld .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: - foo1.domain.tld - foo2.domain.tld domainname The domain name to use for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dommainname: domain.tld entrypoint Entrypoint for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: "mycmd --arg1 --arg2" This argument can also be specified as a list: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: - mycmd - --arg1 - --arg2 environment Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1: value - VAR2: value .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: 'VAR1=value,VAR2=value' .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1=value - VAR2=value extra_hosts Additional hosts to add to the container's /etc/hosts file. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: web1:10.9.8.7,web2:10.9.8.8 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: - web1:10.9.8.7 - web2:10.9.8.8 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` .. note:: This option requires Docker 1.3.0 or newer. group_add List of additional group names and/or IDs that the container process will run as. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: web,network .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: - web - network hostname Hostname of the container. If not provided, the value passed as the container's``name`` will be used for the hostname. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 .. warning:: ``hostname`` cannot be set if ``network_mode`` is set to ``host``. The below example will result in an error: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 - network_mode: host interactive (or *stdin_open*) : False Leave stdin open, even if not attached .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - interactive: True ipc_mode (or *ipc*) Set the IPC mode for the container. The default behavior is to create a private IPC namespace for the container, but this option can be used to change that behavior: - ``container:<container_name_or_id>`` reuses another container shared memory, semaphores and message queues - ``host``: use the host's shared memory, semaphores and message queues .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: container:foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: host .. warning:: Using ``host`` gives the container full access to local shared memory and is therefore considered insecure. isolation Specifies the type of isolation technology used by containers .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - isolation: hyperv .. note:: The default value on Windows server is ``process``, while the default value on Windows client is ``hyperv``. On Linux, only ``default`` is supported. labels Add metadata to the container. Labels can be set both with and without values, and labels with values can be passed either as ``key=value`` or ``key: value`` pairs. For example, while the below would be very confusing to read, it is technically valid, and demonstrates the different ways in which labels can be passed: .. code-block:: yaml mynet: docker_network.present: - labels: - foo - bar=baz - hello: world The labels can also simply be passed as a YAML dictionary, though this can be error-prone due to some :ref:`idiosyncrasies <yaml-idiosyncrasies>` with how PyYAML loads nested data structures: .. code-block:: yaml foo: docker_network.present: - labels: foo: '' bar: baz hello: world .. versionchanged:: 2018.3.0 Methods for specifying labels can now be mixed. Earlier releases required either labels with or without values. links Link this container to another. Links can be specified as a list of mappings or a comma-separated or Python list of expressions in the format ``<container_name_or_id>:<link_alias>``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1: link1 - web2: link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: web1:link1,web2:link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1:link1 - web2:link2 log_driver and log_opt Set container's logging driver and options to configure that driver. Requires Docker 1.6 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address: tcp://192.168.0.42 - syslog-facility: daemon The ``log_opt`` can also be expressed as a comma-separated or YAML list of ``key=value`` pairs. The below two examples are equivalent to the above one: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address=tcp://192.168.0.42 - syslog-facility=daemon .. note:: The logging driver feature was improved in Docker 1.13 introducing option name changes. Please see Docker's `Configure logging drivers`_ documentation for more information. .. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/ lxc_conf Additional LXC configuration parameters to set before starting the container. Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname: docker - lxc.arch: x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: lxc.utsname=docker,lxc.arch=x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname=docker - lxc.arch=x86_64 .. note:: These LXC configuration parameters will only have the desired effect if the container is using the LXC execution driver, which has been deprecated for some time. mac_address MAC address to use for the container. If not specified, a random MAC address will be used. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mac_address: 01:23:45:67:89:0a mem_limit (or *memory*) : 0 Memory limit. Can be specified in bytes or using single-letter units (i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no memory limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_limit: 512M mem_swappiness Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_swappiness: 60 memswap_limit (or *memory_swap*) : -1 Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A value of ``0`` means no swap limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - memswap_limit: 1G network_disabled : False If ``True``, networking will be disabled within the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_disabled: True network_mode : bridge One of the following: - ``bridge`` - Creates a new network stack for the container on the docker bridge - ``none`` - No networking (equivalent of the Docker CLI argument ``--net=none``). Not to be confused with Python's ``None``. - ``container:<name_or_id>`` - Reuses another container's network stack - ``host`` - Use the host's network stack inside the container - Any name that identifies an existing network that might be created with ``docker.network_present``. .. warning:: Using ``host`` mode gives the container full access to the hosts system's services (such as D-bus), and is therefore considered insecure. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: "none" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: container:web1 oom_kill_disable Whether to disable OOM killer .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_kill_disable: False oom_score_adj An integer value containing the score given to the container in order to tune OOM killer preferences .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_score_adj: 500 pid_mode Set to ``host`` to use the host container's PID namespace within the container. Requires Docker 1.5.0 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pid_mode: host .. note:: This option requires Docker 1.5.0 or newer. pids_limit Set the container's PID limit. Set to ``-1`` for unlimited. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pids_limit: 2000 port_bindings (or *publish*) Bind exposed ports. Port bindings should be passed in the same way as the ``--publish`` argument to the ``docker run`` CLI command: - ``ip:hostPort:containerPort`` - Bind a specific IP and port on the host to a specific port within the container. - ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a specific port within the container. - ``hostPort:containerPort`` - Bind a specific port on all of the host's interfaces to a specific port within the container. - ``containerPort`` - Bind an ephemeral port on all of the host's interfaces to a specific port within the container. Multiple bindings can be separated by commas, or expressed as a YAML list, and port ranges can be defined using dashes. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: - 4505-4506:14505-14506 - 2123:2123/udp - 8080 .. note:: When specifying a protocol, it must be passed in the ``containerPort`` value, as seen in the examples above. ports A list of ports to expose on the container. Can either be a comma-separated list or a YAML list. If the protocol is omitted, the port will be assumed to be a TCP port. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 1111,2222/udp .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: - 1111 - 2222/udp privileged : False If ``True``, runs the exec process with extended privileges .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - privileged: True publish_all_ports (or *publish_all*) : False Publish all ports to the host .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 8080 - publish_all_ports: True read_only : False If ``True``, mount the container’s root filesystem as read only .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - read_only: True restart_policy (or *restart*) Set a restart policy for the container. Must be passed as a string in the format ``policy[:retry_count]`` where ``policy`` is one of ``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count`` is an optional limit to the number of retries. The retry count is ignored when using the ``always`` or ``unless-stopped`` restart policy. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - restart_policy: on-failure:5 bar: docker_container.running: - image: bar/baz:latest - restart_policy: always security_opt (or *security_opts*): Security configuration for MLS systems such as SELinux and AppArmor. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: apparmor:unconfined .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: - apparmor:unconfined .. important:: Some security options can contain commas. In these cases, this argument *must* be passed as a Python list, as splitting by comma will result in an invalid configuration. .. note:: See the documentation for security_opt at https://docs.docker.com/engine/reference/run/#security-configuration shm_size Size of /dev/shm .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - shm_size: 128M stop_signal Specify the signal docker will send to the container when stopping. Useful when running systemd as PID 1 inside the container. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_signal: SIGRTMIN+3 .. note:: This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or newer. .. versionadded:: 2016.11.0 stop_timeout Timeout to stop the container, in seconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_timeout: 5 .. note:: In releases prior to 2017.7.0, this option was not set in the container configuration, but rather this timeout was enforced only when shutting down an existing container to replace it. To remove the ambiguity, and to allow for the container to have a stop timeout set for it, the old ``stop_timeout`` argument has been renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's to the container's configured stop timeout. storage_opt Storage driver options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize: 40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: dm.basesize=40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize=40G sysctls (or *sysctl*) Set sysctl options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open: 1048576 - kernel.pid_max: 32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: fs.nr_open=1048576,kernel.pid_max=32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open=1048576 - kernel.pid_max=32768 tmpfs A map of container directories which should be replaced by tmpfs mounts and their corresponding mount options. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tmpfs: - /run: rw,noexec,nosuid,size=65536k tty : False Attach TTYs .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tty: True ulimits List of ulimits. These limits should be passed in the format ``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being optional. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: nofile=1024:1024,nproc=60 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: - nofile=1024:1024 - nproc=60 user User under which to run exec process .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - user: foo userns_mode (or *user_ns_mode*) Sets the user namsepace mode, when the user namespace remapping option is enabled .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - userns_mode: host volumes (or *volume*) List of directories to expose as volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: /mnt/vol1,/mnt/vol2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: - /mnt/vol1 - /mnt/vol2 volumes_from Container names or IDs from which the container will get volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: - foo volume_driver sets the container's volume driver .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volume_driver: foobar working_dir (or *workdir*) Working directory inside the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - working_dir: /var/log/nginx
[ "Ensure", "that", "a", "container", "with", "a", "specific", "configuration", "is", "present", "and", "running" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L220-L2097
train
saltstack/salt
salt/states/docker_container.py
run
def run(name, image=None, onlyif=None, unless=None, creates=None, bg=False, failhard=True, replace=False, force=False, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, **kwargs): ''' .. versionadded:: 2018.3.0 .. note:: If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is not implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. Like the :py:func:`cmd.run <salt.states.cmd.run>` state, only for Docker. Does the equivalent of a ``docker run`` and returns information about the container that was created, as well as its output. This state accepts the same arguments as :py:func:`docker_container.running <salt.states.docker_container.running>`, with the exception of ``watch_action``, ``start``, and ``shutdown_timeout`` (though the ``force`` argument has a different meaning in this state). In addition, this state accepts the arguments from :py:func:`docker.logs <salt.modules.dockermod.logs>`, with the exception of ``follow``, to control how logs are returned. Additionally, the following arguments are supported: onlyif A command or list of commands to run as a check. The container will only run if any of the specified commands returns a zero exit status. unless A command or list of commands to run as a check. The container will only run if any of the specified commands returns a non-zero exit status. creates A path or list of paths. Only run if one or more of the specified paths do not exist on the minion. bg : False If ``True``, run container in background and do not await or deliver its results. .. note:: This may not be useful in cases where other states depend on the results of this state. Also, the logs will be inaccessible once the container exits if ``auto_remove`` is set to ``True``, so keep this in mind. failhard : True If ``True``, the state will return a ``False`` result if the exit code of the container is non-zero. When this argument is set to ``False``, the state will return a ``True`` result regardless of the container's exit code. .. note:: This has no effect if ``bg`` is set to ``True``. replace : False If ``True``, and if the named container already exists, this will remove the existing container. The default behavior is to return a ``False`` result when the container already exists. force : False If ``True``, and the named container already exists, *and* ``replace`` is also set to ``True``, then the container will be forcibly removed. Otherwise, the state will not proceed and will return a ``False`` result. CLI Examples: .. code-block:: bash salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh **USAGE EXAMPLE** .. code-block:: jinja {% set pkg_version = salt.pillar.get('pkg_version', '1.0-1') %} build_package: docker_container.run: - image: myuser/builder:latest - binds: /home/myuser/builds:/build_dir - command: /scripts/build.sh {{ pkg_version }} - creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm - replace: True - networks: - mynet - require: - docker_network: mynet ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = salt.utils.args.clean_kwargs(**kwargs) for unsupported in ('watch_action', 'start', 'shutdown_timeout', 'follow'): if unsupported in kwargs: ret['result'] = False ret['comment'] = 'The \'{0}\' argument is not supported'.format( unsupported ) return ret if image is None: ret['result'] = False ret['comment'] = 'The \'image\' argument is required' return ret elif not isinstance(image, six.string_types): image = six.text_type(image) cret = mod_run_check(onlyif, unless, creates) if isinstance(cret, dict): ret.update(cret) return ret try: if 'networks' in kwargs and kwargs['networks'] is not None: kwargs['networks'] = _parse_networks(kwargs['networks']) _resolve_image(ret, image, client_timeout) except CommandExecutionError as exc: ret['result'] = False if exc.info is not None: return _format_comments(ret, exc.info) else: ret['comment'] = exc.__str__() return ret cret = mod_run_check(onlyif, unless, creates) if isinstance(cret, dict): ret.update(cret) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Container would be run{0}'.format( ' in the background' if bg else '' ) return ret if bg: remove = False else: # We're doing a bit of a hack here, so that we can get the exit code after # the container exits. Since the input translation and compilation of the # host_config take place within a private function of the execution module, # we manually do the handling for auto_remove here and extract if (if # present) from the kwargs. This allows us to explicitly pass auto_remove # as False when we run the container, so it is still present upon exit (and # the exit code can be retrieved). We can then remove the container # manually if auto_remove is True. remove = None for item in ('auto_remove', 'rm'): try: val = kwargs.pop(item) except KeyError: continue if remove is not None: if not ignore_collisions: ret['result'] = False ret['comment'] = ( '\'rm\' is an alias for \'auto_remove\', they cannot ' 'both be used' ) return ret else: remove = bool(val) if remove is not None: # We popped off the value, so replace it with False kwargs['auto_remove'] = False else: remove = False try: ret['changes'] = __salt__['docker.run_container']( image, name=name, skip_translate=skip_translate, ignore_collisions=ignore_collisions, validate_ip_addrs=validate_ip_addrs, client_timeout=client_timeout, bg=bg, replace=replace, force=force, **kwargs) except Exception as exc: log.exception('Encountered error running container') ret['result'] = False ret['comment'] = 'Encountered error running container: {0}'.format(exc) else: if bg: ret['comment'] = 'Container was run in the background' else: try: retcode = ret['changes']['ExitCode'] except KeyError: pass else: ret['result'] = False if failhard and retcode != 0 else True ret['comment'] = ( 'Container ran and exited with a return code of ' '{0}'.format(retcode) ) if remove: id_ = ret.get('changes', {}).get('Id') if id_: try: __salt__['docker.rm'](ret['changes']['Id']) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to auto_remove container: {0}'.format(exc) ) return ret
python
def run(name, image=None, onlyif=None, unless=None, creates=None, bg=False, failhard=True, replace=False, force=False, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, **kwargs): ''' .. versionadded:: 2018.3.0 .. note:: If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is not implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. Like the :py:func:`cmd.run <salt.states.cmd.run>` state, only for Docker. Does the equivalent of a ``docker run`` and returns information about the container that was created, as well as its output. This state accepts the same arguments as :py:func:`docker_container.running <salt.states.docker_container.running>`, with the exception of ``watch_action``, ``start``, and ``shutdown_timeout`` (though the ``force`` argument has a different meaning in this state). In addition, this state accepts the arguments from :py:func:`docker.logs <salt.modules.dockermod.logs>`, with the exception of ``follow``, to control how logs are returned. Additionally, the following arguments are supported: onlyif A command or list of commands to run as a check. The container will only run if any of the specified commands returns a zero exit status. unless A command or list of commands to run as a check. The container will only run if any of the specified commands returns a non-zero exit status. creates A path or list of paths. Only run if one or more of the specified paths do not exist on the minion. bg : False If ``True``, run container in background and do not await or deliver its results. .. note:: This may not be useful in cases where other states depend on the results of this state. Also, the logs will be inaccessible once the container exits if ``auto_remove`` is set to ``True``, so keep this in mind. failhard : True If ``True``, the state will return a ``False`` result if the exit code of the container is non-zero. When this argument is set to ``False``, the state will return a ``True`` result regardless of the container's exit code. .. note:: This has no effect if ``bg`` is set to ``True``. replace : False If ``True``, and if the named container already exists, this will remove the existing container. The default behavior is to return a ``False`` result when the container already exists. force : False If ``True``, and the named container already exists, *and* ``replace`` is also set to ``True``, then the container will be forcibly removed. Otherwise, the state will not proceed and will return a ``False`` result. CLI Examples: .. code-block:: bash salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh **USAGE EXAMPLE** .. code-block:: jinja {% set pkg_version = salt.pillar.get('pkg_version', '1.0-1') %} build_package: docker_container.run: - image: myuser/builder:latest - binds: /home/myuser/builds:/build_dir - command: /scripts/build.sh {{ pkg_version }} - creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm - replace: True - networks: - mynet - require: - docker_network: mynet ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = salt.utils.args.clean_kwargs(**kwargs) for unsupported in ('watch_action', 'start', 'shutdown_timeout', 'follow'): if unsupported in kwargs: ret['result'] = False ret['comment'] = 'The \'{0}\' argument is not supported'.format( unsupported ) return ret if image is None: ret['result'] = False ret['comment'] = 'The \'image\' argument is required' return ret elif not isinstance(image, six.string_types): image = six.text_type(image) cret = mod_run_check(onlyif, unless, creates) if isinstance(cret, dict): ret.update(cret) return ret try: if 'networks' in kwargs and kwargs['networks'] is not None: kwargs['networks'] = _parse_networks(kwargs['networks']) _resolve_image(ret, image, client_timeout) except CommandExecutionError as exc: ret['result'] = False if exc.info is not None: return _format_comments(ret, exc.info) else: ret['comment'] = exc.__str__() return ret cret = mod_run_check(onlyif, unless, creates) if isinstance(cret, dict): ret.update(cret) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Container would be run{0}'.format( ' in the background' if bg else '' ) return ret if bg: remove = False else: # We're doing a bit of a hack here, so that we can get the exit code after # the container exits. Since the input translation and compilation of the # host_config take place within a private function of the execution module, # we manually do the handling for auto_remove here and extract if (if # present) from the kwargs. This allows us to explicitly pass auto_remove # as False when we run the container, so it is still present upon exit (and # the exit code can be retrieved). We can then remove the container # manually if auto_remove is True. remove = None for item in ('auto_remove', 'rm'): try: val = kwargs.pop(item) except KeyError: continue if remove is not None: if not ignore_collisions: ret['result'] = False ret['comment'] = ( '\'rm\' is an alias for \'auto_remove\', they cannot ' 'both be used' ) return ret else: remove = bool(val) if remove is not None: # We popped off the value, so replace it with False kwargs['auto_remove'] = False else: remove = False try: ret['changes'] = __salt__['docker.run_container']( image, name=name, skip_translate=skip_translate, ignore_collisions=ignore_collisions, validate_ip_addrs=validate_ip_addrs, client_timeout=client_timeout, bg=bg, replace=replace, force=force, **kwargs) except Exception as exc: log.exception('Encountered error running container') ret['result'] = False ret['comment'] = 'Encountered error running container: {0}'.format(exc) else: if bg: ret['comment'] = 'Container was run in the background' else: try: retcode = ret['changes']['ExitCode'] except KeyError: pass else: ret['result'] = False if failhard and retcode != 0 else True ret['comment'] = ( 'Container ran and exited with a return code of ' '{0}'.format(retcode) ) if remove: id_ = ret.get('changes', {}).get('Id') if id_: try: __salt__['docker.rm'](ret['changes']['Id']) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to auto_remove container: {0}'.format(exc) ) return ret
[ "def", "run", "(", "name", ",", "image", "=", "None", ",", "onlyif", "=", "None", ",", "unless", "=", "None", ",", "creates", "=", "None", ",", "bg", "=", "False", ",", "failhard", "=", "True", ",", "replace", "=", "False", ",", "force", "=", "Fa...
.. versionadded:: 2018.3.0 .. note:: If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is not implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. Like the :py:func:`cmd.run <salt.states.cmd.run>` state, only for Docker. Does the equivalent of a ``docker run`` and returns information about the container that was created, as well as its output. This state accepts the same arguments as :py:func:`docker_container.running <salt.states.docker_container.running>`, with the exception of ``watch_action``, ``start``, and ``shutdown_timeout`` (though the ``force`` argument has a different meaning in this state). In addition, this state accepts the arguments from :py:func:`docker.logs <salt.modules.dockermod.logs>`, with the exception of ``follow``, to control how logs are returned. Additionally, the following arguments are supported: onlyif A command or list of commands to run as a check. The container will only run if any of the specified commands returns a zero exit status. unless A command or list of commands to run as a check. The container will only run if any of the specified commands returns a non-zero exit status. creates A path or list of paths. Only run if one or more of the specified paths do not exist on the minion. bg : False If ``True``, run container in background and do not await or deliver its results. .. note:: This may not be useful in cases where other states depend on the results of this state. Also, the logs will be inaccessible once the container exits if ``auto_remove`` is set to ``True``, so keep this in mind. failhard : True If ``True``, the state will return a ``False`` result if the exit code of the container is non-zero. When this argument is set to ``False``, the state will return a ``True`` result regardless of the container's exit code. .. note:: This has no effect if ``bg`` is set to ``True``. replace : False If ``True``, and if the named container already exists, this will remove the existing container. The default behavior is to return a ``False`` result when the container already exists. force : False If ``True``, and the named container already exists, *and* ``replace`` is also set to ``True``, then the container will be forcibly removed. Otherwise, the state will not proceed and will return a ``False`` result. CLI Examples: .. code-block:: bash salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh **USAGE EXAMPLE** .. code-block:: jinja {% set pkg_version = salt.pillar.get('pkg_version', '1.0-1') %} build_package: docker_container.run: - image: myuser/builder:latest - binds: /home/myuser/builds:/build_dir - command: /scripts/build.sh {{ pkg_version }} - creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm - replace: True - networks: - mynet - require: - docker_network: mynet
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L2100-L2330
train
saltstack/salt
salt/states/docker_container.py
stopped
def stopped(name=None, containers=None, shutdown_timeout=None, unpause=False, error_on_absent=True, **kwargs): ''' Ensure that a container (or containers) is stopped name Name or ID of the container containers Run this state on more than one container at a time. The following two examples accomplish the same thing: .. code-block:: yaml stopped_containers: docker_container.stopped: - names: - foo - bar - baz .. code-block:: yaml stopped_containers: docker_container.stopped: - containers: - foo - bar - baz However, the second example will be a bit quicker since Salt will stop all specified containers in a single run, rather than executing the state separately on each image (as it would in the first example). shutdown_timeout Timeout for graceful shutdown of the container. If this timeout is exceeded, the container will be killed. If this value is not passed, then the container's configured ``stop_timeout`` will be observed. If ``stop_timeout`` was also unset on the container, then a timeout of 10 seconds will be used. unpause : False Set to ``True`` to unpause any paused containers before stopping. If unset, then an error will be raised for any container that was paused. error_on_absent : True By default, this state will return an error if any of the specified containers are absent. Set this to ``False`` to suppress that error. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not name and not containers: ret['comment'] = 'One of \'name\' and \'containers\' must be provided' return ret if containers is not None: if not isinstance(containers, list): ret['comment'] = 'containers must be a list' return ret targets = [] for target in containers: if not isinstance(target, six.string_types): target = six.text_type(target) targets.append(target) elif name: if not isinstance(name, six.string_types): targets = [six.text_type(name)] else: targets = [name] containers = {} for target in targets: try: c_state = __salt__['docker.state'](target) except CommandExecutionError: containers.setdefault('absent', []).append(target) else: containers.setdefault(c_state, []).append(target) errors = [] if error_on_absent and 'absent' in containers: errors.append( 'The following container(s) are absent: {0}'.format( ', '.join(containers['absent']) ) ) if not unpause and 'paused' in containers: ret['result'] = False errors.append( 'The following container(s) are paused: {0}'.format( ', '.join(containers['paused']) ) ) if errors: ret['result'] = False ret['comment'] = '. '.join(errors) return ret to_stop = containers.get('running', []) + containers.get('paused', []) if not to_stop: ret['result'] = True if len(targets) == 1: ret['comment'] = 'Container \'{0}\' is '.format(targets[0]) else: ret['comment'] = 'All specified containers are ' if 'absent' in containers: ret['comment'] += 'absent or ' ret['comment'] += 'not running' return ret if __opts__['test']: ret['result'] = None ret['comment'] = ( 'The following container(s) will be stopped: {0}' .format(', '.join(to_stop)) ) return ret stop_errors = [] for target in to_stop: stop_kwargs = {'unpause': unpause} if shutdown_timeout: stop_kwargs['timeout'] = shutdown_timeout changes = __salt__['docker.stop'](target, **stop_kwargs) if changes['result'] is True: ret['changes'][target] = changes else: if 'comment' in changes: stop_errors.append(changes['comment']) else: stop_errors.append( 'Failed to stop container \'{0}\''.format(target) ) if stop_errors: ret['comment'] = '; '.join(stop_errors) return ret ret['result'] = True ret['comment'] = ( 'The following container(s) were stopped: {0}' .format(', '.join(to_stop)) ) return ret
python
def stopped(name=None, containers=None, shutdown_timeout=None, unpause=False, error_on_absent=True, **kwargs): ''' Ensure that a container (or containers) is stopped name Name or ID of the container containers Run this state on more than one container at a time. The following two examples accomplish the same thing: .. code-block:: yaml stopped_containers: docker_container.stopped: - names: - foo - bar - baz .. code-block:: yaml stopped_containers: docker_container.stopped: - containers: - foo - bar - baz However, the second example will be a bit quicker since Salt will stop all specified containers in a single run, rather than executing the state separately on each image (as it would in the first example). shutdown_timeout Timeout for graceful shutdown of the container. If this timeout is exceeded, the container will be killed. If this value is not passed, then the container's configured ``stop_timeout`` will be observed. If ``stop_timeout`` was also unset on the container, then a timeout of 10 seconds will be used. unpause : False Set to ``True`` to unpause any paused containers before stopping. If unset, then an error will be raised for any container that was paused. error_on_absent : True By default, this state will return an error if any of the specified containers are absent. Set this to ``False`` to suppress that error. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not name and not containers: ret['comment'] = 'One of \'name\' and \'containers\' must be provided' return ret if containers is not None: if not isinstance(containers, list): ret['comment'] = 'containers must be a list' return ret targets = [] for target in containers: if not isinstance(target, six.string_types): target = six.text_type(target) targets.append(target) elif name: if not isinstance(name, six.string_types): targets = [six.text_type(name)] else: targets = [name] containers = {} for target in targets: try: c_state = __salt__['docker.state'](target) except CommandExecutionError: containers.setdefault('absent', []).append(target) else: containers.setdefault(c_state, []).append(target) errors = [] if error_on_absent and 'absent' in containers: errors.append( 'The following container(s) are absent: {0}'.format( ', '.join(containers['absent']) ) ) if not unpause and 'paused' in containers: ret['result'] = False errors.append( 'The following container(s) are paused: {0}'.format( ', '.join(containers['paused']) ) ) if errors: ret['result'] = False ret['comment'] = '. '.join(errors) return ret to_stop = containers.get('running', []) + containers.get('paused', []) if not to_stop: ret['result'] = True if len(targets) == 1: ret['comment'] = 'Container \'{0}\' is '.format(targets[0]) else: ret['comment'] = 'All specified containers are ' if 'absent' in containers: ret['comment'] += 'absent or ' ret['comment'] += 'not running' return ret if __opts__['test']: ret['result'] = None ret['comment'] = ( 'The following container(s) will be stopped: {0}' .format(', '.join(to_stop)) ) return ret stop_errors = [] for target in to_stop: stop_kwargs = {'unpause': unpause} if shutdown_timeout: stop_kwargs['timeout'] = shutdown_timeout changes = __salt__['docker.stop'](target, **stop_kwargs) if changes['result'] is True: ret['changes'][target] = changes else: if 'comment' in changes: stop_errors.append(changes['comment']) else: stop_errors.append( 'Failed to stop container \'{0}\''.format(target) ) if stop_errors: ret['comment'] = '; '.join(stop_errors) return ret ret['result'] = True ret['comment'] = ( 'The following container(s) were stopped: {0}' .format(', '.join(to_stop)) ) return ret
[ "def", "stopped", "(", "name", "=", "None", ",", "containers", "=", "None", ",", "shutdown_timeout", "=", "None", ",", "unpause", "=", "False", ",", "error_on_absent", "=", "True", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "n...
Ensure that a container (or containers) is stopped name Name or ID of the container containers Run this state on more than one container at a time. The following two examples accomplish the same thing: .. code-block:: yaml stopped_containers: docker_container.stopped: - names: - foo - bar - baz .. code-block:: yaml stopped_containers: docker_container.stopped: - containers: - foo - bar - baz However, the second example will be a bit quicker since Salt will stop all specified containers in a single run, rather than executing the state separately on each image (as it would in the first example). shutdown_timeout Timeout for graceful shutdown of the container. If this timeout is exceeded, the container will be killed. If this value is not passed, then the container's configured ``stop_timeout`` will be observed. If ``stop_timeout`` was also unset on the container, then a timeout of 10 seconds will be used. unpause : False Set to ``True`` to unpause any paused containers before stopping. If unset, then an error will be raised for any container that was paused. error_on_absent : True By default, this state will return an error if any of the specified containers are absent. Set this to ``False`` to suppress that error.
[ "Ensure", "that", "a", "container", "(", "or", "containers", ")", "is", "stopped" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L2333-L2485
train
saltstack/salt
salt/states/docker_container.py
absent
def absent(name, force=False): ''' Ensure that a container is absent name Name of the container force : False Set to ``True`` to remove the container even if it is running Usage Examples: .. code-block:: yaml mycontainer: docker_container.absent multiple_containers: docker_container.absent: - names: - foo - bar - baz ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if name not in __salt__['docker.list_containers'](all=True): ret['result'] = True ret['comment'] = 'Container \'{0}\' does not exist'.format(name) return ret pre_state = __salt__['docker.state'](name) if pre_state != 'stopped' and not force: ret['comment'] = ('Container is running, set force to True to ' 'forcibly remove it') return ret if __opts__['test']: ret['result'] = None ret['comment'] = ('Container \'{0}\' will be removed'.format(name)) return ret try: ret['changes']['removed'] = __salt__['docker.rm'](name, force=force) except Exception as exc: ret['comment'] = ('Failed to remove container \'{0}\': {1}' .format(name, exc)) return ret if name in __salt__['docker.list_containers'](all=True): ret['comment'] = 'Failed to remove container \'{0}\''.format(name) else: if force and pre_state != 'stopped': method = 'Forcibly' else: method = 'Successfully' ret['comment'] = '{0} removed container \'{1}\''.format(method, name) ret['result'] = True return ret
python
def absent(name, force=False): ''' Ensure that a container is absent name Name of the container force : False Set to ``True`` to remove the container even if it is running Usage Examples: .. code-block:: yaml mycontainer: docker_container.absent multiple_containers: docker_container.absent: - names: - foo - bar - baz ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if name not in __salt__['docker.list_containers'](all=True): ret['result'] = True ret['comment'] = 'Container \'{0}\' does not exist'.format(name) return ret pre_state = __salt__['docker.state'](name) if pre_state != 'stopped' and not force: ret['comment'] = ('Container is running, set force to True to ' 'forcibly remove it') return ret if __opts__['test']: ret['result'] = None ret['comment'] = ('Container \'{0}\' will be removed'.format(name)) return ret try: ret['changes']['removed'] = __salt__['docker.rm'](name, force=force) except Exception as exc: ret['comment'] = ('Failed to remove container \'{0}\': {1}' .format(name, exc)) return ret if name in __salt__['docker.list_containers'](all=True): ret['comment'] = 'Failed to remove container \'{0}\''.format(name) else: if force and pre_state != 'stopped': method = 'Forcibly' else: method = 'Successfully' ret['comment'] = '{0} removed container \'{1}\''.format(method, name) ret['result'] = True return ret
[ "def", "absent", "(", "name", ",", "force", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "if", "name", "not", "in", "__salt__", "...
Ensure that a container is absent name Name of the container force : False Set to ``True`` to remove the container even if it is running Usage Examples: .. code-block:: yaml mycontainer: docker_container.absent multiple_containers: docker_container.absent: - names: - foo - bar - baz
[ "Ensure", "that", "a", "container", "is", "absent" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L2488-L2549
train
saltstack/salt
salt/states/docker_container.py
mod_run_check
def mod_run_check(onlyif, unless, creates): ''' Execute the onlyif/unless/creates logic. Returns a result dict if any of the checks fail, otherwise returns True ''' cmd_kwargs = {'use_vt': False, 'bg': False} if onlyif is not None: if isinstance(onlyif, six.string_types): onlyif = [onlyif] if not isinstance(onlyif, list) \ or not all(isinstance(x, six.string_types) for x in onlyif): return {'comment': 'onlyif is not a string or list of strings', 'skip_watch': True, 'result': True} for entry in onlyif: retcode = __salt__['cmd.retcode']( entry, ignore_retcode=True, python_shell=True) if retcode != 0: return { 'comment': 'onlyif command {0} returned exit code of {1}' .format(entry, retcode), 'skip_watch': True, 'result': True } if unless is not None: if isinstance(unless, six.string_types): unless = [unless] if not isinstance(unless, list) \ or not all(isinstance(x, six.string_types) for x in unless): return {'comment': 'unless is not a string or list of strings', 'skip_watch': True, 'result': True} for entry in unless: retcode = __salt__['cmd.retcode']( entry, ignore_retcode=True, python_shell=True) if retcode == 0: return { 'comment': 'unless command {0} returned exit code of {1}' .format(entry, retcode), 'skip_watch': True, 'result': True } if creates is not None: if isinstance(creates, six.string_types): creates = [creates] if not isinstance(creates, list) \ or not all(isinstance(x, six.string_types) for x in creates): return {'comment': 'creates is not a string or list of strings', 'skip_watch': True, 'result': True} if all(os.path.exists(x) for x in creates): return {'comment': 'All specified paths in \'creates\' ' 'argument exist', 'result': True} # No reason to stop, return True return True
python
def mod_run_check(onlyif, unless, creates): ''' Execute the onlyif/unless/creates logic. Returns a result dict if any of the checks fail, otherwise returns True ''' cmd_kwargs = {'use_vt': False, 'bg': False} if onlyif is not None: if isinstance(onlyif, six.string_types): onlyif = [onlyif] if not isinstance(onlyif, list) \ or not all(isinstance(x, six.string_types) for x in onlyif): return {'comment': 'onlyif is not a string or list of strings', 'skip_watch': True, 'result': True} for entry in onlyif: retcode = __salt__['cmd.retcode']( entry, ignore_retcode=True, python_shell=True) if retcode != 0: return { 'comment': 'onlyif command {0} returned exit code of {1}' .format(entry, retcode), 'skip_watch': True, 'result': True } if unless is not None: if isinstance(unless, six.string_types): unless = [unless] if not isinstance(unless, list) \ or not all(isinstance(x, six.string_types) for x in unless): return {'comment': 'unless is not a string or list of strings', 'skip_watch': True, 'result': True} for entry in unless: retcode = __salt__['cmd.retcode']( entry, ignore_retcode=True, python_shell=True) if retcode == 0: return { 'comment': 'unless command {0} returned exit code of {1}' .format(entry, retcode), 'skip_watch': True, 'result': True } if creates is not None: if isinstance(creates, six.string_types): creates = [creates] if not isinstance(creates, list) \ or not all(isinstance(x, six.string_types) for x in creates): return {'comment': 'creates is not a string or list of strings', 'skip_watch': True, 'result': True} if all(os.path.exists(x) for x in creates): return {'comment': 'All specified paths in \'creates\' ' 'argument exist', 'result': True} # No reason to stop, return True return True
[ "def", "mod_run_check", "(", "onlyif", ",", "unless", ",", "creates", ")", ":", "cmd_kwargs", "=", "{", "'use_vt'", ":", "False", ",", "'bg'", ":", "False", "}", "if", "onlyif", "is", "not", "None", ":", "if", "isinstance", "(", "onlyif", ",", "six", ...
Execute the onlyif/unless/creates logic. Returns a result dict if any of the checks fail, otherwise returns True
[ "Execute", "the", "onlyif", "/", "unless", "/", "creates", "logic", ".", "Returns", "a", "result", "dict", "if", "any", "of", "the", "checks", "fail", "otherwise", "returns", "True" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L2552-L2615
train
saltstack/salt
salt/states/docker_container.py
mod_watch
def mod_watch(name, sfun=None, **kwargs): ''' The docker_container watcher, called to invoke the watch command. .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' if sfun == 'running': watch_kwargs = copy.deepcopy(kwargs) if watch_kwargs.get('watch_action', 'force') == 'force': watch_kwargs['force'] = True else: watch_kwargs['send_signal'] = True watch_kwargs['force'] = False return running(name, **watch_kwargs) if sfun == 'stopped': return stopped(name, **salt.utils.args.clean_kwargs(**kwargs)) if sfun == 'run': return run(name, **salt.utils.args.clean_kwargs(**kwargs)) return {'name': name, 'changes': {}, 'result': False, 'comment': ('watch requisite is not' ' implemented for {0}'.format(sfun))}
python
def mod_watch(name, sfun=None, **kwargs): ''' The docker_container watcher, called to invoke the watch command. .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' if sfun == 'running': watch_kwargs = copy.deepcopy(kwargs) if watch_kwargs.get('watch_action', 'force') == 'force': watch_kwargs['force'] = True else: watch_kwargs['send_signal'] = True watch_kwargs['force'] = False return running(name, **watch_kwargs) if sfun == 'stopped': return stopped(name, **salt.utils.args.clean_kwargs(**kwargs)) if sfun == 'run': return run(name, **salt.utils.args.clean_kwargs(**kwargs)) return {'name': name, 'changes': {}, 'result': False, 'comment': ('watch requisite is not' ' implemented for {0}'.format(sfun))}
[ "def", "mod_watch", "(", "name", ",", "sfun", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "sfun", "==", "'running'", ":", "watch_kwargs", "=", "copy", ".", "deepcopy", "(", "kwargs", ")", "if", "watch_kwargs", ".", "get", "(", "'watch_action'...
The docker_container watcher, called to invoke the watch command. .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
[ "The", "docker_container", "watcher", "called", "to", "invoke", "the", "watch", "command", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/docker_container.py#L2618-L2647
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
_cache_provider_details
def _cache_provider_details(conn=None): ''' Provide a place to hang onto results of --list-[locations|sizes|images] so we don't have to go out to the API and get them every time. ''' DETAILS['avail_locations'] = {} DETAILS['avail_sizes'] = {} DETAILS['avail_images'] = {} locations = avail_locations(conn) images = avail_images(conn) sizes = avail_sizes(conn) for key, location in six.iteritems(locations): DETAILS['avail_locations'][location['name']] = location DETAILS['avail_locations'][key] = location for key, image in six.iteritems(images): DETAILS['avail_images'][image['name']] = image DETAILS['avail_images'][key] = image for key, vm_size in six.iteritems(sizes): DETAILS['avail_sizes'][vm_size['name']] = vm_size DETAILS['avail_sizes'][key] = vm_size
python
def _cache_provider_details(conn=None): ''' Provide a place to hang onto results of --list-[locations|sizes|images] so we don't have to go out to the API and get them every time. ''' DETAILS['avail_locations'] = {} DETAILS['avail_sizes'] = {} DETAILS['avail_images'] = {} locations = avail_locations(conn) images = avail_images(conn) sizes = avail_sizes(conn) for key, location in six.iteritems(locations): DETAILS['avail_locations'][location['name']] = location DETAILS['avail_locations'][key] = location for key, image in six.iteritems(images): DETAILS['avail_images'][image['name']] = image DETAILS['avail_images'][key] = image for key, vm_size in six.iteritems(sizes): DETAILS['avail_sizes'][vm_size['name']] = vm_size DETAILS['avail_sizes'][key] = vm_size
[ "def", "_cache_provider_details", "(", "conn", "=", "None", ")", ":", "DETAILS", "[", "'avail_locations'", "]", "=", "{", "}", "DETAILS", "[", "'avail_sizes'", "]", "=", "{", "}", "DETAILS", "[", "'avail_images'", "]", "=", "{", "}", "locations", "=", "a...
Provide a place to hang onto results of --list-[locations|sizes|images] so we don't have to go out to the API and get them every time.
[ "Provide", "a", "place", "to", "hang", "onto", "results", "of", "--", "list", "-", "[", "locations|sizes|images", "]", "so", "we", "don", "t", "have", "to", "go", "out", "to", "the", "API", "and", "get", "them", "every", "time", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L100-L122
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
list_nodes
def list_nodes(**kwargs): ''' Return basic data on nodes ''' ret = {} nodes = list_nodes_full() for node in nodes: ret[node] = {} for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips': ret[node][prop] = nodes[node][prop] return ret
python
def list_nodes(**kwargs): ''' Return basic data on nodes ''' ret = {} nodes = list_nodes_full() for node in nodes: ret[node] = {} for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips': ret[node][prop] = nodes[node][prop] return ret
[ "def", "list_nodes", "(", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "nodes", "=", "list_nodes_full", "(", ")", "for", "node", "in", "nodes", ":", "ret", "[", "node", "]", "=", "{", "}", "for", "prop", "in", "'id'", ",", "'image'", ",", ...
Return basic data on nodes
[ "Return", "basic", "data", "on", "nodes" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L160-L172
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
list_nodes_full
def list_nodes_full(**kwargs): ''' Return all data on nodes ''' nodes = _query('server/list') ret = {} for node in nodes: name = nodes[node]['label'] ret[name] = nodes[node].copy() ret[name]['id'] = node ret[name]['image'] = nodes[node]['os'] ret[name]['size'] = nodes[node]['VPSPLANID'] ret[name]['state'] = nodes[node]['status'] ret[name]['private_ips'] = nodes[node]['internal_ip'] ret[name]['public_ips'] = nodes[node]['main_ip'] return ret
python
def list_nodes_full(**kwargs): ''' Return all data on nodes ''' nodes = _query('server/list') ret = {} for node in nodes: name = nodes[node]['label'] ret[name] = nodes[node].copy() ret[name]['id'] = node ret[name]['image'] = nodes[node]['os'] ret[name]['size'] = nodes[node]['VPSPLANID'] ret[name]['state'] = nodes[node]['status'] ret[name]['private_ips'] = nodes[node]['internal_ip'] ret[name]['public_ips'] = nodes[node]['main_ip'] return ret
[ "def", "list_nodes_full", "(", "*", "*", "kwargs", ")", ":", "nodes", "=", "_query", "(", "'server/list'", ")", "ret", "=", "{", "}", "for", "node", "in", "nodes", ":", "name", "=", "nodes", "[", "node", "]", "[", "'label'", "]", "ret", "[", "name"...
Return all data on nodes
[ "Return", "all", "data", "on", "nodes" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L175-L192
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
destroy
def destroy(name): ''' Remove a node from Vultr ''' node = show_instance(name, call='action') params = {'SUBID': node['SUBID']} result = _query('server/destroy', method='POST', decode=False, data=_urlencode(params)) # The return of a destroy call is empty in the case of a success. # Errors are only indicated via HTTP status code. Status code 200 # effetively therefore means "success". if result.get('body') == '' and result.get('text') == '': return True return result
python
def destroy(name): ''' Remove a node from Vultr ''' node = show_instance(name, call='action') params = {'SUBID': node['SUBID']} result = _query('server/destroy', method='POST', decode=False, data=_urlencode(params)) # The return of a destroy call is empty in the case of a success. # Errors are only indicated via HTTP status code. Status code 200 # effetively therefore means "success". if result.get('body') == '' and result.get('text') == '': return True return result
[ "def", "destroy", "(", "name", ")", ":", "node", "=", "show_instance", "(", "name", ",", "call", "=", "'action'", ")", "params", "=", "{", "'SUBID'", ":", "node", "[", "'SUBID'", "]", "}", "result", "=", "_query", "(", "'server/destroy'", ",", "method"...
Remove a node from Vultr
[ "Remove", "a", "node", "from", "Vultr" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L204-L217
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
show_instance
def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() # Find under which cloud service the name is listed, if any if name not in nodes: return {} __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name]
python
def show_instance(name, call=None): ''' Show the details from the provider concerning an instance ''' if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) nodes = list_nodes_full() # Find under which cloud service the name is listed, if any if name not in nodes: return {} __utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__) return nodes[name]
[ "def", "show_instance", "(", "name", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The show_instance action must be called with -a or --action.'", ")", "nodes", "=", "list_nodes_full", "(", ")", "# Fi...
Show the details from the provider concerning an instance
[ "Show", "the", "details", "from", "the", "provider", "concerning", "an", "instance" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L234-L248
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
_lookup_vultrid
def _lookup_vultrid(which_key, availkey, keyname): ''' Helper function to retrieve a Vultr ID ''' if DETAILS == {}: _cache_provider_details() which_key = six.text_type(which_key) try: return DETAILS[availkey][which_key][keyname] except KeyError: return False
python
def _lookup_vultrid(which_key, availkey, keyname): ''' Helper function to retrieve a Vultr ID ''' if DETAILS == {}: _cache_provider_details() which_key = six.text_type(which_key) try: return DETAILS[availkey][which_key][keyname] except KeyError: return False
[ "def", "_lookup_vultrid", "(", "which_key", ",", "availkey", ",", "keyname", ")", ":", "if", "DETAILS", "==", "{", "}", ":", "_cache_provider_details", "(", ")", "which_key", "=", "six", ".", "text_type", "(", "which_key", ")", "try", ":", "return", "DETAI...
Helper function to retrieve a Vultr ID
[ "Helper", "function", "to", "retrieve", "a", "Vultr", "ID" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L251-L262
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
create
def create(vm_): ''' Create a single VM from a data dict ''' if 'driver' not in vm_: vm_['driver'] = vm_['provider'] private_networking = config.get_cloud_config_value( 'enable_private_network', vm_, __opts__, search_global=False, default=False, ) startup_script = config.get_cloud_config_value( 'startup_script_id', vm_, __opts__, search_global=False, default=None, ) if startup_script and str(startup_script) not in avail_scripts(): log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script)) return False if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError("'private_networking' should be a boolean value.") if private_networking is True: enable_private_network = 'yes' else: enable_private_network = 'no' __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID') if not osid: log.error('Vultr does not have an image with id or name %s', vm_['image']) return False vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID') if not vpsplanid: log.error('Vultr does not have a size with id or name %s', vm_['size']) return False dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID') if not dcid: log.error('Vultr does not have a location with id or name %s', vm_['location']) return False kwargs = { 'label': vm_['name'], 'OSID': osid, 'VPSPLANID': vpsplanid, 'DCID': dcid, 'hostname': vm_['name'], 'enable_private_network': enable_private_network, } if startup_script: kwargs['SCRIPTID'] = startup_script log.info('Creating Cloud VM %s', vm_['name']) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) try: data = _query('server/create', method='POST', data=_urlencode(kwargs)) if int(data.get('status', '200')) >= 300: log.error( 'Error creating %s on Vultr\n\n' 'Vultr API returned %s\n', vm_['name'], data ) log.error('Status 412 may mean that you are requesting an\n' 'invalid location, image, or size.') __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False except Exception as exc: log.error( 'Error creating %s on Vultr\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False def wait_for_hostname(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') main_ip = six.text_type(data.get('main_ip', '0')) if main_ip.startswith('0'): time.sleep(3) return False return data['main_ip'] def wait_for_default_password(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for default password") # pprint.pprint(data) if six.text_type(data.get('default_password', '')) == '': time.sleep(1) return False return data['default_password'] def wait_for_status(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for status normal") # pprint.pprint(data) if six.text_type(data.get('status', '')) != 'active': time.sleep(1) return False return data['default_password'] def wait_for_server_state(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for server state ok") # pprint.pprint(data) if six.text_type(data.get('server_state', '')) != 'ok': time.sleep(1) return False return data['default_password'] vm_['ssh_host'] = __utils__['cloud.wait_for_fun']( wait_for_hostname, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['password'] = __utils__['cloud.wait_for_fun']( wait_for_default_password, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_status, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_server_state, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __opts__['hard_timeout'] = config.get_cloud_config_value( 'hard_timeout', get_configured_provider(), __opts__, search_global=False, default=None, ) # Bootstrap ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(show_instance(vm_['name'], call='action')) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
python
def create(vm_): ''' Create a single VM from a data dict ''' if 'driver' not in vm_: vm_['driver'] = vm_['provider'] private_networking = config.get_cloud_config_value( 'enable_private_network', vm_, __opts__, search_global=False, default=False, ) startup_script = config.get_cloud_config_value( 'startup_script_id', vm_, __opts__, search_global=False, default=None, ) if startup_script and str(startup_script) not in avail_scripts(): log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script)) return False if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError("'private_networking' should be a boolean value.") if private_networking is True: enable_private_network = 'yes' else: enable_private_network = 'no' __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID') if not osid: log.error('Vultr does not have an image with id or name %s', vm_['image']) return False vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID') if not vpsplanid: log.error('Vultr does not have a size with id or name %s', vm_['size']) return False dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID') if not dcid: log.error('Vultr does not have a location with id or name %s', vm_['location']) return False kwargs = { 'label': vm_['name'], 'OSID': osid, 'VPSPLANID': vpsplanid, 'DCID': dcid, 'hostname': vm_['name'], 'enable_private_network': enable_private_network, } if startup_script: kwargs['SCRIPTID'] = startup_script log.info('Creating Cloud VM %s', vm_['name']) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) try: data = _query('server/create', method='POST', data=_urlencode(kwargs)) if int(data.get('status', '200')) >= 300: log.error( 'Error creating %s on Vultr\n\n' 'Vultr API returned %s\n', vm_['name'], data ) log.error('Status 412 may mean that you are requesting an\n' 'invalid location, image, or size.') __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False except Exception as exc: log.error( 'Error creating %s on Vultr\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False def wait_for_hostname(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') main_ip = six.text_type(data.get('main_ip', '0')) if main_ip.startswith('0'): time.sleep(3) return False return data['main_ip'] def wait_for_default_password(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for default password") # pprint.pprint(data) if six.text_type(data.get('default_password', '')) == '': time.sleep(1) return False return data['default_password'] def wait_for_status(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for status normal") # pprint.pprint(data) if six.text_type(data.get('status', '')) != 'active': time.sleep(1) return False return data['default_password'] def wait_for_server_state(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for server state ok") # pprint.pprint(data) if six.text_type(data.get('server_state', '')) != 'ok': time.sleep(1) return False return data['default_password'] vm_['ssh_host'] = __utils__['cloud.wait_for_fun']( wait_for_hostname, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['password'] = __utils__['cloud.wait_for_fun']( wait_for_default_password, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_status, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_server_state, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __opts__['hard_timeout'] = config.get_cloud_config_value( 'hard_timeout', get_configured_provider(), __opts__, search_global=False, default=None, ) # Bootstrap ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(show_instance(vm_['name'], call='action')) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
[ "def", "create", "(", "vm_", ")", ":", "if", "'driver'", "not", "in", "vm_", ":", "vm_", "[", "'driver'", "]", "=", "vm_", "[", "'provider'", "]", "private_networking", "=", "config", ".", "get_cloud_config_value", "(", "'enable_private_network'", ",", "vm_"...
Create a single VM from a data dict
[ "Create", "a", "single", "VM", "from", "a", "data", "dict" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L265-L474
train
saltstack/salt
salt/cloud/clouds/vultrpy.py
_query
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True): ''' Perform a query directly against the Vultr REST API ''' api_key = config.get_cloud_config_value( 'api_key', get_configured_provider(), __opts__, search_global=False, ) management_host = config.get_cloud_config_value( 'management_host', get_configured_provider(), __opts__, search_global=False, default='api.vultr.com' ) url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format( management_host=management_host, path=path, api_key=api_key, ) if header_dict is None: header_dict = {} result = __utils__['http.query']( url, method=method, params=params, data=data, header_dict=header_dict, port=443, text=True, decode=decode, decode_type='json', hide_fields=['api_key'], opts=__opts__, ) if 'dict' in result: return result['dict'] return result
python
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True): ''' Perform a query directly against the Vultr REST API ''' api_key = config.get_cloud_config_value( 'api_key', get_configured_provider(), __opts__, search_global=False, ) management_host = config.get_cloud_config_value( 'management_host', get_configured_provider(), __opts__, search_global=False, default='api.vultr.com' ) url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format( management_host=management_host, path=path, api_key=api_key, ) if header_dict is None: header_dict = {} result = __utils__['http.query']( url, method=method, params=params, data=data, header_dict=header_dict, port=443, text=True, decode=decode, decode_type='json', hide_fields=['api_key'], opts=__opts__, ) if 'dict' in result: return result['dict'] return result
[ "def", "_query", "(", "path", ",", "method", "=", "'GET'", ",", "data", "=", "None", ",", "params", "=", "None", ",", "header_dict", "=", "None", ",", "decode", "=", "True", ")", ":", "api_key", "=", "config", ".", "get_cloud_config_value", "(", "'api_...
Perform a query directly against the Vultr REST API
[ "Perform", "a", "query", "directly", "against", "the", "Vultr", "REST", "API" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L477-L519
train
saltstack/salt
salt/pillar/redismod.py
ext_pillar
def ext_pillar(minion_id, pillar, function, **kwargs): ''' Grabs external pillar data based on configured function ''' if function.startswith('_') or function not in globals(): return {} # Call specified function to pull redis data return globals()[function](minion_id, pillar, **kwargs)
python
def ext_pillar(minion_id, pillar, function, **kwargs): ''' Grabs external pillar data based on configured function ''' if function.startswith('_') or function not in globals(): return {} # Call specified function to pull redis data return globals()[function](minion_id, pillar, **kwargs)
[ "def", "ext_pillar", "(", "minion_id", ",", "pillar", ",", "function", ",", "*", "*", "kwargs", ")", ":", "if", "function", ".", "startswith", "(", "'_'", ")", "or", "function", "not", "in", "globals", "(", ")", ":", "return", "{", "}", "# Call specifi...
Grabs external pillar data based on configured function
[ "Grabs", "external", "pillar", "data", "based", "on", "configured", "function" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/redismod.py#L51-L58
train
saltstack/salt
salt/pillar/redismod.py
key_value
def key_value(minion_id, pillar, # pylint: disable=W0613 pillar_key='redis_pillar'): ''' Looks for key in redis matching minion_id, returns a structure based on the data type of the redis key. String for string type, dict for hash type and lists for lists, sets and sorted sets. pillar_key Pillar key to return data into ''' # Identify key type and process as needed based on that type key_type = __salt__['redis.key_type'](minion_id) if key_type == 'string': return {pillar_key: __salt__['redis.get_key'](minion_id)} elif key_type == 'hash': return {pillar_key: __salt__['redis.hgetall'](minion_id)} elif key_type == 'list': list_size = __salt__['redis.llen'](minion_id) if not list_size: return {} return {pillar_key: __salt__['redis.lrange'](minion_id, 0, list_size - 1)} elif key_type == 'set': return {pillar_key: __salt__['redis.smembers'](minion_id)} elif key_type == 'zset': set_size = __salt__['redis.zcard'](minion_id) if not set_size: return {} return {pillar_key: __salt__['redis.zrange'](minion_id, 0, set_size - 1)} # Return nothing for unhandled types return {}
python
def key_value(minion_id, pillar, # pylint: disable=W0613 pillar_key='redis_pillar'): ''' Looks for key in redis matching minion_id, returns a structure based on the data type of the redis key. String for string type, dict for hash type and lists for lists, sets and sorted sets. pillar_key Pillar key to return data into ''' # Identify key type and process as needed based on that type key_type = __salt__['redis.key_type'](minion_id) if key_type == 'string': return {pillar_key: __salt__['redis.get_key'](minion_id)} elif key_type == 'hash': return {pillar_key: __salt__['redis.hgetall'](minion_id)} elif key_type == 'list': list_size = __salt__['redis.llen'](minion_id) if not list_size: return {} return {pillar_key: __salt__['redis.lrange'](minion_id, 0, list_size - 1)} elif key_type == 'set': return {pillar_key: __salt__['redis.smembers'](minion_id)} elif key_type == 'zset': set_size = __salt__['redis.zcard'](minion_id) if not set_size: return {} return {pillar_key: __salt__['redis.zrange'](minion_id, 0, set_size - 1)} # Return nothing for unhandled types return {}
[ "def", "key_value", "(", "minion_id", ",", "pillar", ",", "# pylint: disable=W0613", "pillar_key", "=", "'redis_pillar'", ")", ":", "# Identify key type and process as needed based on that type", "key_type", "=", "__salt__", "[", "'redis.key_type'", "]", "(", "minion_id", ...
Looks for key in redis matching minion_id, returns a structure based on the data type of the redis key. String for string type, dict for hash type and lists for lists, sets and sorted sets. pillar_key Pillar key to return data into
[ "Looks", "for", "key", "in", "redis", "matching", "minion_id", "returns", "a", "structure", "based", "on", "the", "data", "type", "of", "the", "redis", "key", ".", "String", "for", "string", "type", "dict", "for", "hash", "type", "and", "lists", "for", "...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/redismod.py#L61-L93
train
saltstack/salt
salt/pillar/redismod.py
key_json
def key_json(minion_id, pillar, # pylint: disable=W0613 pillar_key=None): ''' Pulls a string from redis and deserializes it from json. Deserialized dictionary data loaded directly into top level if pillar_key is not set. pillar_key Pillar key to return data into ''' key_data = __salt__['redis.get_key'](minion_id) # Return nothing for non-existent keys if not key_data: return {} data = salt.utils.json.loads(key_data) # Return as requested if isinstance(data, dict) and not pillar_key: return data elif not pillar_key: return {'redis_pillar': data} else: return {pillar_key: data}
python
def key_json(minion_id, pillar, # pylint: disable=W0613 pillar_key=None): ''' Pulls a string from redis and deserializes it from json. Deserialized dictionary data loaded directly into top level if pillar_key is not set. pillar_key Pillar key to return data into ''' key_data = __salt__['redis.get_key'](minion_id) # Return nothing for non-existent keys if not key_data: return {} data = salt.utils.json.loads(key_data) # Return as requested if isinstance(data, dict) and not pillar_key: return data elif not pillar_key: return {'redis_pillar': data} else: return {pillar_key: data}
[ "def", "key_json", "(", "minion_id", ",", "pillar", ",", "# pylint: disable=W0613", "pillar_key", "=", "None", ")", ":", "key_data", "=", "__salt__", "[", "'redis.get_key'", "]", "(", "minion_id", ")", "# Return nothing for non-existent keys", "if", "not", "key_data...
Pulls a string from redis and deserializes it from json. Deserialized dictionary data loaded directly into top level if pillar_key is not set. pillar_key Pillar key to return data into
[ "Pulls", "a", "string", "from", "redis", "and", "deserializes", "it", "from", "json", ".", "Deserialized", "dictionary", "data", "loaded", "directly", "into", "top", "level", "if", "pillar_key", "is", "not", "set", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/redismod.py#L96-L118
train
saltstack/salt
salt/modules/eix.py
sync
def sync(): ''' Sync portage/overlay trees and update the eix database CLI Example: .. code-block:: bash salt '*' eix.sync ''' # Funtoo patches eix to use 'ego sync' if __grains__['os'] == 'Funtoo': cmd = 'eix-sync -q' else: cmd = 'eix-sync -q -C "--ask" -C "n"' if 'makeconf.features_contains' in __salt__ and __salt__['makeconf.features_contains']('webrsync-gpg'): # GPG sign verify is supported only for "webrsync" if salt.utils.path.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' cmd += ' -W' else: cmd += ' -w' return __salt__['cmd.retcode'](cmd) == 0 else: if __salt__['cmd.retcode'](cmd) == 0: return True # We fall back to "webrsync" if "rsync" fails for some reason if salt.utils.path.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' cmd += ' -W' else: cmd += ' -w' return __salt__['cmd.retcode'](cmd) == 0
python
def sync(): ''' Sync portage/overlay trees and update the eix database CLI Example: .. code-block:: bash salt '*' eix.sync ''' # Funtoo patches eix to use 'ego sync' if __grains__['os'] == 'Funtoo': cmd = 'eix-sync -q' else: cmd = 'eix-sync -q -C "--ask" -C "n"' if 'makeconf.features_contains' in __salt__ and __salt__['makeconf.features_contains']('webrsync-gpg'): # GPG sign verify is supported only for "webrsync" if salt.utils.path.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' cmd += ' -W' else: cmd += ' -w' return __salt__['cmd.retcode'](cmd) == 0 else: if __salt__['cmd.retcode'](cmd) == 0: return True # We fall back to "webrsync" if "rsync" fails for some reason if salt.utils.path.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' cmd += ' -W' else: cmd += ' -w' return __salt__['cmd.retcode'](cmd) == 0
[ "def", "sync", "(", ")", ":", "# Funtoo patches eix to use 'ego sync'", "if", "__grains__", "[", "'os'", "]", "==", "'Funtoo'", ":", "cmd", "=", "'eix-sync -q'", "else", ":", "cmd", "=", "'eix-sync -q -C \"--ask\" -C \"n\"'", "if", "'makeconf.features_contains'", "in"...
Sync portage/overlay trees and update the eix database CLI Example: .. code-block:: bash salt '*' eix.sync
[ "Sync", "portage", "/", "overlay", "trees", "and", "update", "the", "eix", "database" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/eix.py#L20-L50
train
saltstack/salt
salt/modules/pw_group.py
add
def add(name, gid=None, **kwargs): ''' Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456 ''' kwargs = salt.utils.args.clean_kwargs(**kwargs) if salt.utils.data.is_true(kwargs.pop('system', False)): log.warning('pw_group module does not support the \'system\' argument') if kwargs: log.warning('Invalid kwargs passed to group.add') cmd = 'pw groupadd ' if gid: cmd += '-g {0} '.format(gid) cmd = '{0} -n {1}'.format(cmd, name) ret = __salt__['cmd.run_all'](cmd, python_shell=False) return not ret['retcode']
python
def add(name, gid=None, **kwargs): ''' Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456 ''' kwargs = salt.utils.args.clean_kwargs(**kwargs) if salt.utils.data.is_true(kwargs.pop('system', False)): log.warning('pw_group module does not support the \'system\' argument') if kwargs: log.warning('Invalid kwargs passed to group.add') cmd = 'pw groupadd ' if gid: cmd += '-g {0} '.format(gid) cmd = '{0} -n {1}'.format(cmd, name) ret = __salt__['cmd.run_all'](cmd, python_shell=False) return not ret['retcode']
[ "def", "add", "(", "name", ",", "gid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "kwargs", ")", "if", "salt", ".", "utils", ".", "data", ".", "is_true", ...
Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456
[ "Add", "the", "specified", "group" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pw_group.py#L43-L65
train
saltstack/salt
salt/modules/pw_group.py
adduser
def adduser(name, username): ''' Add a user in the group. CLI Example: .. code-block:: bash salt '*' group.adduser foo bar Verifies if a valid username 'bar' as a member of an existing group 'foo', if not then adds it. ''' # Note: pw exits with code 65 if group is unknown retcode = __salt__['cmd.retcode']('pw groupmod {0} -m {1}'.format( name, username), python_shell=False) return not retcode
python
def adduser(name, username): ''' Add a user in the group. CLI Example: .. code-block:: bash salt '*' group.adduser foo bar Verifies if a valid username 'bar' as a member of an existing group 'foo', if not then adds it. ''' # Note: pw exits with code 65 if group is unknown retcode = __salt__['cmd.retcode']('pw groupmod {0} -m {1}'.format( name, username), python_shell=False) return not retcode
[ "def", "adduser", "(", "name", ",", "username", ")", ":", "# Note: pw exits with code 65 if group is unknown", "retcode", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "'pw groupmod {0} -m {1}'", ".", "format", "(", "name", ",", "username", ")", ",", "python_shel...
Add a user in the group. CLI Example: .. code-block:: bash salt '*' group.adduser foo bar Verifies if a valid username 'bar' as a member of an existing group 'foo', if not then adds it.
[ "Add", "a", "user", "in", "the", "group", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pw_group.py#L145-L162
train
saltstack/salt
salt/modules/pw_group.py
deluser
def deluser(name, username): ''' Remove a user from the group. CLI Example: .. code-block:: bash salt '*' group.deluser foo bar Removes a member user 'bar' from a group 'foo'. If group is not present then returns True. ''' grp_info = __salt__['group.info'](name) if username not in grp_info['members']: return True # Note: pw exits with code 65 if group is unknown retcode = __salt__['cmd.retcode']('pw groupmod {0} -d {1}'.format( name, username), python_shell=False) return not retcode
python
def deluser(name, username): ''' Remove a user from the group. CLI Example: .. code-block:: bash salt '*' group.deluser foo bar Removes a member user 'bar' from a group 'foo'. If group is not present then returns True. ''' grp_info = __salt__['group.info'](name) if username not in grp_info['members']: return True # Note: pw exits with code 65 if group is unknown retcode = __salt__['cmd.retcode']('pw groupmod {0} -d {1}'.format( name, username), python_shell=False) return not retcode
[ "def", "deluser", "(", "name", ",", "username", ")", ":", "grp_info", "=", "__salt__", "[", "'group.info'", "]", "(", "name", ")", "if", "username", "not", "in", "grp_info", "[", "'members'", "]", ":", "return", "True", "# Note: pw exits with code 65 if group ...
Remove a user from the group. CLI Example: .. code-block:: bash salt '*' group.deluser foo bar Removes a member user 'bar' from a group 'foo'. If group is not present then returns True.
[ "Remove", "a", "user", "from", "the", "group", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pw_group.py#L165-L187
train
saltstack/salt
salt/states/jboss7.py
datasource_exists
def datasource_exists(name, jboss_config, datasource_properties, recreate=False, profile=None): ''' Ensures that a datasource with given properties exist on the jboss instance. If datasource doesn't exist, it is created, otherwise only the properties that are different will be updated. name Datasource property name jboss_config Dict with connection properties (see state description) datasource_properties Dict with datasource properties recreate : False If set to True and datasource exists it will be removed and created again. However, if there are deployments that depend on the datasource, it will not me possible to remove it. profile : None The profile name for this datasource (domain mode only) Example: .. code-block:: yaml sampleDS: jboss7.datasource_exists: - recreate: False - datasource_properties: driver-name: mysql connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase' jndi-name: 'java:jboss/datasources/sampleDS' user-name: sampleuser password: secret min-pool-size: 3 use-java-context: True - jboss_config: {{ pillar['jboss'] }} - profile: full-ha ''' log.debug(" ======================== STATE: jboss7.datasource_exists (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} has_changed = False ds_current_properties = {} ds_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) if ds_result['success']: ds_current_properties = ds_result['result'] if recreate: remove_result = __salt__['jboss7.remove_datasource'](jboss_config=jboss_config, name=name, profile=profile) if remove_result['success']: ret['changes']['removed'] = name else: ret['result'] = False ret['comment'] = 'Could not remove datasource. Stdout: '+remove_result['stdout'] return ret has_changed = True # if we are here, we have already made a change create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile) if create_result['success']: ret['changes']['created'] = name else: ret['result'] = False ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout'] return ret read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) if read_result['success']: ds_new_properties = read_result['result'] else: ret['result'] = False ret['comment'] = 'Could not read datasource. Stdout: '+read_result['stdout'] return ret else: update_result = __salt__['jboss7.update_datasource'](jboss_config=jboss_config, name=name, new_properties=datasource_properties, profile=profile) if not update_result['success']: ret['result'] = False ret['comment'] = 'Could not update datasource. '+update_result['comment'] # some changes to the datasource may have already been made, therefore we don't quit here else: ret['comment'] = 'Datasource updated.' read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) ds_new_properties = read_result['result'] else: if ds_result['err_code'] == 'JBAS014807': # ok, resource not exists: create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile) if create_result['success']: read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) ds_new_properties = read_result['result'] ret['comment'] = 'Datasource created.' else: ret['result'] = False ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout'] else: raise CommandExecutionError('Unable to handle error: {0}'.format(ds_result['failure-description'])) if ret['result']: log.debug("ds_new_properties=%s", ds_new_properties) log.debug("ds_current_properties=%s", ds_current_properties) diff = dictdiffer.diff(ds_new_properties, ds_current_properties) added = diff.added() if added: has_changed = True ret['changes']['added'] = __format_ds_changes(added, ds_current_properties, ds_new_properties) removed = diff.removed() if removed: has_changed = True ret['changes']['removed'] = __format_ds_changes(removed, ds_current_properties, ds_new_properties) changed = diff.changed() if changed: has_changed = True ret['changes']['changed'] = __format_ds_changes(changed, ds_current_properties, ds_new_properties) if not has_changed: ret['comment'] = 'Datasource not changed.' return ret
python
def datasource_exists(name, jboss_config, datasource_properties, recreate=False, profile=None): ''' Ensures that a datasource with given properties exist on the jboss instance. If datasource doesn't exist, it is created, otherwise only the properties that are different will be updated. name Datasource property name jboss_config Dict with connection properties (see state description) datasource_properties Dict with datasource properties recreate : False If set to True and datasource exists it will be removed and created again. However, if there are deployments that depend on the datasource, it will not me possible to remove it. profile : None The profile name for this datasource (domain mode only) Example: .. code-block:: yaml sampleDS: jboss7.datasource_exists: - recreate: False - datasource_properties: driver-name: mysql connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase' jndi-name: 'java:jboss/datasources/sampleDS' user-name: sampleuser password: secret min-pool-size: 3 use-java-context: True - jboss_config: {{ pillar['jboss'] }} - profile: full-ha ''' log.debug(" ======================== STATE: jboss7.datasource_exists (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} has_changed = False ds_current_properties = {} ds_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) if ds_result['success']: ds_current_properties = ds_result['result'] if recreate: remove_result = __salt__['jboss7.remove_datasource'](jboss_config=jboss_config, name=name, profile=profile) if remove_result['success']: ret['changes']['removed'] = name else: ret['result'] = False ret['comment'] = 'Could not remove datasource. Stdout: '+remove_result['stdout'] return ret has_changed = True # if we are here, we have already made a change create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile) if create_result['success']: ret['changes']['created'] = name else: ret['result'] = False ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout'] return ret read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) if read_result['success']: ds_new_properties = read_result['result'] else: ret['result'] = False ret['comment'] = 'Could not read datasource. Stdout: '+read_result['stdout'] return ret else: update_result = __salt__['jboss7.update_datasource'](jboss_config=jboss_config, name=name, new_properties=datasource_properties, profile=profile) if not update_result['success']: ret['result'] = False ret['comment'] = 'Could not update datasource. '+update_result['comment'] # some changes to the datasource may have already been made, therefore we don't quit here else: ret['comment'] = 'Datasource updated.' read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) ds_new_properties = read_result['result'] else: if ds_result['err_code'] == 'JBAS014807': # ok, resource not exists: create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile) if create_result['success']: read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile) ds_new_properties = read_result['result'] ret['comment'] = 'Datasource created.' else: ret['result'] = False ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout'] else: raise CommandExecutionError('Unable to handle error: {0}'.format(ds_result['failure-description'])) if ret['result']: log.debug("ds_new_properties=%s", ds_new_properties) log.debug("ds_current_properties=%s", ds_current_properties) diff = dictdiffer.diff(ds_new_properties, ds_current_properties) added = diff.added() if added: has_changed = True ret['changes']['added'] = __format_ds_changes(added, ds_current_properties, ds_new_properties) removed = diff.removed() if removed: has_changed = True ret['changes']['removed'] = __format_ds_changes(removed, ds_current_properties, ds_new_properties) changed = diff.changed() if changed: has_changed = True ret['changes']['changed'] = __format_ds_changes(changed, ds_current_properties, ds_new_properties) if not has_changed: ret['comment'] = 'Datasource not changed.' return ret
[ "def", "datasource_exists", "(", "name", ",", "jboss_config", ",", "datasource_properties", ",", "recreate", "=", "False", ",", "profile", "=", "None", ")", ":", "log", ".", "debug", "(", "\" ======================== STATE: jboss7.datasource_exists (name: %s) \"", ",", ...
Ensures that a datasource with given properties exist on the jboss instance. If datasource doesn't exist, it is created, otherwise only the properties that are different will be updated. name Datasource property name jboss_config Dict with connection properties (see state description) datasource_properties Dict with datasource properties recreate : False If set to True and datasource exists it will be removed and created again. However, if there are deployments that depend on the datasource, it will not me possible to remove it. profile : None The profile name for this datasource (domain mode only) Example: .. code-block:: yaml sampleDS: jboss7.datasource_exists: - recreate: False - datasource_properties: driver-name: mysql connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase' jndi-name: 'java:jboss/datasources/sampleDS' user-name: sampleuser password: secret min-pool-size: 3 use-java-context: True - jboss_config: {{ pillar['jboss'] }} - profile: full-ha
[ "Ensures", "that", "a", "datasource", "with", "given", "properties", "exist", "on", "the", "jboss", "instance", ".", "If", "datasource", "doesn", "t", "exist", "it", "is", "created", "otherwise", "only", "the", "properties", "that", "are", "different", "will",...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/jboss7.py#L56-L176
train
saltstack/salt
salt/states/jboss7.py
bindings_exist
def bindings_exist(name, jboss_config, bindings, profile=None): ''' Ensures that given JNDI binding are present on the server. If a binding doesn't exist on the server it will be created. If it already exists its value will be changed. jboss_config: Dict with connection properties (see state description) bindings: Dict with bindings to set. profile: The profile name (domain mode only) Example: .. code-block:: yaml jndi_entries_created: jboss7.bindings_exist: - bindings: 'java:global/sampleapp/environment': 'DEV' 'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties' - jboss_config: {{ pillar['jboss'] }} ''' log.debug(" ======================== STATE: jboss7.bindings_exist (name: %s) (profile: %s) ", name, profile) log.debug('bindings=%s', bindings) ret = {'name': name, 'result': True, 'changes': {}, 'comment': 'Bindings not changed.'} has_changed = False for key in bindings: value = six.text_type(bindings[key]) query_result = __salt__['jboss7.read_simple_binding'](binding_name=key, jboss_config=jboss_config, profile=profile) if query_result['success']: current_value = query_result['result']['value'] if current_value != value: update_result = __salt__['jboss7.update_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile) if update_result['success']: has_changed = True __log_binding_change(ret['changes'], 'changed', key, value, current_value) else: raise CommandExecutionError(update_result['failure-description']) else: if query_result['err_code'] == 'JBAS014807': # ok, resource not exists: create_result = __salt__['jboss7.create_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile) if create_result['success']: has_changed = True __log_binding_change(ret['changes'], 'added', key, value) else: raise CommandExecutionError(create_result['failure-description']) else: raise CommandExecutionError(query_result['failure-description']) if has_changed: ret['comment'] = 'Bindings changed.' return ret
python
def bindings_exist(name, jboss_config, bindings, profile=None): ''' Ensures that given JNDI binding are present on the server. If a binding doesn't exist on the server it will be created. If it already exists its value will be changed. jboss_config: Dict with connection properties (see state description) bindings: Dict with bindings to set. profile: The profile name (domain mode only) Example: .. code-block:: yaml jndi_entries_created: jboss7.bindings_exist: - bindings: 'java:global/sampleapp/environment': 'DEV' 'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties' - jboss_config: {{ pillar['jboss'] }} ''' log.debug(" ======================== STATE: jboss7.bindings_exist (name: %s) (profile: %s) ", name, profile) log.debug('bindings=%s', bindings) ret = {'name': name, 'result': True, 'changes': {}, 'comment': 'Bindings not changed.'} has_changed = False for key in bindings: value = six.text_type(bindings[key]) query_result = __salt__['jboss7.read_simple_binding'](binding_name=key, jboss_config=jboss_config, profile=profile) if query_result['success']: current_value = query_result['result']['value'] if current_value != value: update_result = __salt__['jboss7.update_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile) if update_result['success']: has_changed = True __log_binding_change(ret['changes'], 'changed', key, value, current_value) else: raise CommandExecutionError(update_result['failure-description']) else: if query_result['err_code'] == 'JBAS014807': # ok, resource not exists: create_result = __salt__['jboss7.create_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile) if create_result['success']: has_changed = True __log_binding_change(ret['changes'], 'added', key, value) else: raise CommandExecutionError(create_result['failure-description']) else: raise CommandExecutionError(query_result['failure-description']) if has_changed: ret['comment'] = 'Bindings changed.' return ret
[ "def", "bindings_exist", "(", "name", ",", "jboss_config", ",", "bindings", ",", "profile", "=", "None", ")", ":", "log", ".", "debug", "(", "\" ======================== STATE: jboss7.bindings_exist (name: %s) (profile: %s) \"", ",", "name", ",", "profile", ")", "log"...
Ensures that given JNDI binding are present on the server. If a binding doesn't exist on the server it will be created. If it already exists its value will be changed. jboss_config: Dict with connection properties (see state description) bindings: Dict with bindings to set. profile: The profile name (domain mode only) Example: .. code-block:: yaml jndi_entries_created: jboss7.bindings_exist: - bindings: 'java:global/sampleapp/environment': 'DEV' 'java:global/sampleapp/configurationFile': '/var/opt/sampleapp/config.properties' - jboss_config: {{ pillar['jboss'] }}
[ "Ensures", "that", "given", "JNDI", "binding", "are", "present", "on", "the", "server", ".", "If", "a", "binding", "doesn", "t", "exist", "on", "the", "server", "it", "will", "be", "created", ".", "If", "it", "already", "exists", "its", "value", "will", ...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/jboss7.py#L203-L261
train
saltstack/salt
salt/states/jboss7.py
deployed
def deployed(name, jboss_config, salt_source=None): '''Ensures that the given application is deployed on server. jboss_config: Dict with connection properties (see state description) salt_source: How to find the artifact to be deployed. target_file: Where to look in the minion's file system for the artifact to be deployed (e.g. '/tmp/application-web-0.39.war'). When source is specified, also specifies where to save the retrieved file. source: (optional) File on salt master (e.g. salt://application-web-0.39.war). If absent, no files will be retrieved and the artifact in target_file will be used for the deployment. undeploy: (optional) Regular expression to match against existing deployments. When present, if there is a deployment that matches the regular expression, it will be undeployed before the new artifact is deployed. undeploy_force: (optional) If True, the artifact will be undeployed although it has not changed. Examples: Deployment of a file from minion's local file system: .. code-block:: yaml application_deployed: jboss7.deployed: - salt_source: target_file: '/tmp/webapp.war' - jboss_config: {{ pillar['jboss'] }} It is assumed that /tmp/webapp.war was made available by some other means. No applications will be undeployed; if an existing deployment that shares that name exists, then it will be replaced with the updated version. Deployment of a file from the Salt master's file system: .. code-block:: yaml application_deployed: jboss7.deployed: - salt_source: source: salt://application-web-0.39.war target_file: '/tmp/application-web-0.39.war' undeploy: 'application-web-.*' - jboss_config: {{ pillar['jboss'] }} Here, application-web-0.39.war file is downloaded from Salt file system to /tmp/application-web-0.39.war file on minion. Existing deployments are checked if any of them matches 'application-web-.*' regular expression, and if so then it is undeployed before deploying the application. This is useful to automate deployment of new application versions. If the source parameter of salt_source is specified, it can use any protocol that the file states use. This includes not only downloading from the master but also HTTP, HTTPS, FTP, Amazon S3, and OpenStack Swift. ''' log.debug(" ======================== STATE: jboss7.deployed (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} comment = '' validate_success, validate_comment = __validate_arguments(jboss_config, salt_source) if not validate_success: return _error(ret, validate_comment) resolved_source, get_artifact_comment, changed = __get_artifact(salt_source) log.debug('resolved_source=%s', resolved_source) log.debug('get_artifact_comment=%s', get_artifact_comment) comment = __append_comment(new_comment=get_artifact_comment, current_comment=comment) if resolved_source is None: return _error(ret, get_artifact_comment) find_success, deployment, find_comment = __find_deployment(jboss_config, salt_source) if not find_success: return _error(ret, find_comment) require_deployment = True log.debug('deployment=%s', deployment) if deployment is not None: if 'undeploy_force' in salt_source: if salt_source['undeploy_force']: ret['changes']['undeployed'] = __undeploy(jboss_config, deployment) else: if changed: ret['changes']['undeployed'] = __undeploy(jboss_config, deployment) else: require_deployment = False comment = __append_comment(new_comment='The artifact {} was already deployed'.format(deployment), current_comment=comment) else: ret['changes']['undeployed'] = __undeploy(jboss_config, deployment) if require_deployment: deploy_result = __salt__['jboss7.deploy'](jboss_config=jboss_config, source_file=resolved_source) log.debug('deploy_result=%s', str(deploy_result)) if deploy_result['success']: comment = __append_comment(new_comment='Deployment completed.', current_comment=comment) ret['changes']['deployed'] = resolved_source else: comment = __append_comment(new_comment='''Deployment failed\nreturn code={retcode}\nstdout='{stdout}'\nstderr='{stderr}'''.format(**deploy_result), current_comment=comment) _error(ret, comment) ret['comment'] = comment return ret
python
def deployed(name, jboss_config, salt_source=None): '''Ensures that the given application is deployed on server. jboss_config: Dict with connection properties (see state description) salt_source: How to find the artifact to be deployed. target_file: Where to look in the minion's file system for the artifact to be deployed (e.g. '/tmp/application-web-0.39.war'). When source is specified, also specifies where to save the retrieved file. source: (optional) File on salt master (e.g. salt://application-web-0.39.war). If absent, no files will be retrieved and the artifact in target_file will be used for the deployment. undeploy: (optional) Regular expression to match against existing deployments. When present, if there is a deployment that matches the regular expression, it will be undeployed before the new artifact is deployed. undeploy_force: (optional) If True, the artifact will be undeployed although it has not changed. Examples: Deployment of a file from minion's local file system: .. code-block:: yaml application_deployed: jboss7.deployed: - salt_source: target_file: '/tmp/webapp.war' - jboss_config: {{ pillar['jboss'] }} It is assumed that /tmp/webapp.war was made available by some other means. No applications will be undeployed; if an existing deployment that shares that name exists, then it will be replaced with the updated version. Deployment of a file from the Salt master's file system: .. code-block:: yaml application_deployed: jboss7.deployed: - salt_source: source: salt://application-web-0.39.war target_file: '/tmp/application-web-0.39.war' undeploy: 'application-web-.*' - jboss_config: {{ pillar['jboss'] }} Here, application-web-0.39.war file is downloaded from Salt file system to /tmp/application-web-0.39.war file on minion. Existing deployments are checked if any of them matches 'application-web-.*' regular expression, and if so then it is undeployed before deploying the application. This is useful to automate deployment of new application versions. If the source parameter of salt_source is specified, it can use any protocol that the file states use. This includes not only downloading from the master but also HTTP, HTTPS, FTP, Amazon S3, and OpenStack Swift. ''' log.debug(" ======================== STATE: jboss7.deployed (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} comment = '' validate_success, validate_comment = __validate_arguments(jboss_config, salt_source) if not validate_success: return _error(ret, validate_comment) resolved_source, get_artifact_comment, changed = __get_artifact(salt_source) log.debug('resolved_source=%s', resolved_source) log.debug('get_artifact_comment=%s', get_artifact_comment) comment = __append_comment(new_comment=get_artifact_comment, current_comment=comment) if resolved_source is None: return _error(ret, get_artifact_comment) find_success, deployment, find_comment = __find_deployment(jboss_config, salt_source) if not find_success: return _error(ret, find_comment) require_deployment = True log.debug('deployment=%s', deployment) if deployment is not None: if 'undeploy_force' in salt_source: if salt_source['undeploy_force']: ret['changes']['undeployed'] = __undeploy(jboss_config, deployment) else: if changed: ret['changes']['undeployed'] = __undeploy(jboss_config, deployment) else: require_deployment = False comment = __append_comment(new_comment='The artifact {} was already deployed'.format(deployment), current_comment=comment) else: ret['changes']['undeployed'] = __undeploy(jboss_config, deployment) if require_deployment: deploy_result = __salt__['jboss7.deploy'](jboss_config=jboss_config, source_file=resolved_source) log.debug('deploy_result=%s', str(deploy_result)) if deploy_result['success']: comment = __append_comment(new_comment='Deployment completed.', current_comment=comment) ret['changes']['deployed'] = resolved_source else: comment = __append_comment(new_comment='''Deployment failed\nreturn code={retcode}\nstdout='{stdout}'\nstderr='{stderr}'''.format(**deploy_result), current_comment=comment) _error(ret, comment) ret['comment'] = comment return ret
[ "def", "deployed", "(", "name", ",", "jboss_config", ",", "salt_source", "=", "None", ")", ":", "log", ".", "debug", "(", "\" ======================== STATE: jboss7.deployed (name: %s) \"", ",", "name", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'result'...
Ensures that the given application is deployed on server. jboss_config: Dict with connection properties (see state description) salt_source: How to find the artifact to be deployed. target_file: Where to look in the minion's file system for the artifact to be deployed (e.g. '/tmp/application-web-0.39.war'). When source is specified, also specifies where to save the retrieved file. source: (optional) File on salt master (e.g. salt://application-web-0.39.war). If absent, no files will be retrieved and the artifact in target_file will be used for the deployment. undeploy: (optional) Regular expression to match against existing deployments. When present, if there is a deployment that matches the regular expression, it will be undeployed before the new artifact is deployed. undeploy_force: (optional) If True, the artifact will be undeployed although it has not changed. Examples: Deployment of a file from minion's local file system: .. code-block:: yaml application_deployed: jboss7.deployed: - salt_source: target_file: '/tmp/webapp.war' - jboss_config: {{ pillar['jboss'] }} It is assumed that /tmp/webapp.war was made available by some other means. No applications will be undeployed; if an existing deployment that shares that name exists, then it will be replaced with the updated version. Deployment of a file from the Salt master's file system: .. code-block:: yaml application_deployed: jboss7.deployed: - salt_source: source: salt://application-web-0.39.war target_file: '/tmp/application-web-0.39.war' undeploy: 'application-web-.*' - jboss_config: {{ pillar['jboss'] }} Here, application-web-0.39.war file is downloaded from Salt file system to /tmp/application-web-0.39.war file on minion. Existing deployments are checked if any of them matches 'application-web-.*' regular expression, and if so then it is undeployed before deploying the application. This is useful to automate deployment of new application versions. If the source parameter of salt_source is specified, it can use any protocol that the file states use. This includes not only downloading from the master but also HTTP, HTTPS, FTP, Amazon S3, and OpenStack Swift.
[ "Ensures", "that", "the", "given", "application", "is", "deployed", "on", "server", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/jboss7.py#L273-L380
train
saltstack/salt
salt/states/jboss7.py
reloaded
def reloaded(name, jboss_config, timeout=60, interval=5): ''' Reloads configuration of jboss server. jboss_config: Dict with connection properties (see state description) timeout: Time to wait until jboss is back in running state. Default timeout is 60s. interval: Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than process cleanup time it may easily lead to excessive resource consumption. This step performs the following operations: * Ensures that server is in running or reload-required state (by reading server-state attribute) * Reloads configuration * Waits for server to reload and be in running state Example: .. code-block:: yaml configuration_reloaded: jboss7.reloaded: - jboss_config: {{ pillar['jboss'] }} ''' log.debug(" ======================== STATE: jboss7.reloaded (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} status = __salt__['jboss7.status'](jboss_config) if not status['success'] or status['result'] not in ('running', 'reload-required'): ret['result'] = False ret['comment'] = "Cannot reload server configuration, it should be up and in 'running' or 'reload-required' state." return ret result = __salt__['jboss7.reload'](jboss_config) if result['success'] or \ 'Operation failed: Channel closed' in result['stdout'] or \ 'Communication error: java.util.concurrent.ExecutionException: Operation failed' in result['stdout']: wait_time = 0 status = None while (status is None or not status['success'] or status['result'] != 'running') and wait_time < timeout: time.sleep(interval) wait_time += interval status = __salt__['jboss7.status'](jboss_config) if status['success'] and status['result'] == 'running': ret['result'] = True ret['comment'] = 'Configuration reloaded' ret['changes']['reloaded'] = 'configuration' else: ret['result'] = False ret['comment'] = 'Could not reload the configuration. Timeout ({0} s) exceeded. '.format(timeout) if not status['success']: ret['comment'] = __append_comment('Could not connect to JBoss controller.', ret['comment']) else: ret['comment'] = __append_comment(('Server is in {0} state'.format(status['result'])), ret['comment']) else: ret['result'] = False ret['comment'] = 'Could not reload the configuration, stdout:'+result['stdout'] return ret
python
def reloaded(name, jboss_config, timeout=60, interval=5): ''' Reloads configuration of jboss server. jboss_config: Dict with connection properties (see state description) timeout: Time to wait until jboss is back in running state. Default timeout is 60s. interval: Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than process cleanup time it may easily lead to excessive resource consumption. This step performs the following operations: * Ensures that server is in running or reload-required state (by reading server-state attribute) * Reloads configuration * Waits for server to reload and be in running state Example: .. code-block:: yaml configuration_reloaded: jboss7.reloaded: - jboss_config: {{ pillar['jboss'] }} ''' log.debug(" ======================== STATE: jboss7.reloaded (name: %s) ", name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} status = __salt__['jboss7.status'](jboss_config) if not status['success'] or status['result'] not in ('running', 'reload-required'): ret['result'] = False ret['comment'] = "Cannot reload server configuration, it should be up and in 'running' or 'reload-required' state." return ret result = __salt__['jboss7.reload'](jboss_config) if result['success'] or \ 'Operation failed: Channel closed' in result['stdout'] or \ 'Communication error: java.util.concurrent.ExecutionException: Operation failed' in result['stdout']: wait_time = 0 status = None while (status is None or not status['success'] or status['result'] != 'running') and wait_time < timeout: time.sleep(interval) wait_time += interval status = __salt__['jboss7.status'](jboss_config) if status['success'] and status['result'] == 'running': ret['result'] = True ret['comment'] = 'Configuration reloaded' ret['changes']['reloaded'] = 'configuration' else: ret['result'] = False ret['comment'] = 'Could not reload the configuration. Timeout ({0} s) exceeded. '.format(timeout) if not status['success']: ret['comment'] = __append_comment('Could not connect to JBoss controller.', ret['comment']) else: ret['comment'] = __append_comment(('Server is in {0} state'.format(status['result'])), ret['comment']) else: ret['result'] = False ret['comment'] = 'Could not reload the configuration, stdout:'+result['stdout'] return ret
[ "def", "reloaded", "(", "name", ",", "jboss_config", ",", "timeout", "=", "60", ",", "interval", "=", "5", ")", ":", "log", ".", "debug", "(", "\" ======================== STATE: jboss7.reloaded (name: %s) \"", ",", "name", ")", "ret", "=", "{", "'name'", ":",...
Reloads configuration of jboss server. jboss_config: Dict with connection properties (see state description) timeout: Time to wait until jboss is back in running state. Default timeout is 60s. interval: Interval between state checks. Default interval is 5s. Decreasing the interval may slightly decrease waiting time but be aware that every status check is a call to jboss-cli which is a java process. If interval is smaller than process cleanup time it may easily lead to excessive resource consumption. This step performs the following operations: * Ensures that server is in running or reload-required state (by reading server-state attribute) * Reloads configuration * Waits for server to reload and be in running state Example: .. code-block:: yaml configuration_reloaded: jboss7.reloaded: - jboss_config: {{ pillar['jboss'] }}
[ "Reloads", "configuration", "of", "jboss", "server", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/jboss7.py#L484-L549
train
saltstack/salt
salt/utils/validate/user.py
valid_username
def valid_username(user): ''' Validates a username based on the guidelines in `useradd(8)` ''' if not isinstance(user, six.string_types): return False if len(user) > 32: return False return VALID_USERNAME.match(user) is not None
python
def valid_username(user): ''' Validates a username based on the guidelines in `useradd(8)` ''' if not isinstance(user, six.string_types): return False if len(user) > 32: return False return VALID_USERNAME.match(user) is not None
[ "def", "valid_username", "(", "user", ")", ":", "if", "not", "isinstance", "(", "user", ",", "six", ".", "string_types", ")", ":", "return", "False", "if", "len", "(", "user", ")", ">", "32", ":", "return", "False", "return", "VALID_USERNAME", ".", "ma...
Validates a username based on the guidelines in `useradd(8)`
[ "Validates", "a", "username", "based", "on", "the", "guidelines", "in", "useradd", "(", "8", ")" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/validate/user.py#L17-L27
train
saltstack/salt
salt/renderers/pyobjects.py
load_states
def load_states(): ''' This loads our states into the salt __context__ ''' states = {} # the loader expects to find pillar & grain data __opts__['grains'] = salt.loader.grains(__opts__) __opts__['pillar'] = __pillar__ lazy_utils = salt.loader.utils(__opts__) lazy_funcs = salt.loader.minion_mods(__opts__, utils=lazy_utils) lazy_serializers = salt.loader.serializers(__opts__) lazy_states = salt.loader.states(__opts__, lazy_funcs, lazy_utils, lazy_serializers) # TODO: some way to lazily do this? This requires loading *all* state modules for key, func in six.iteritems(lazy_states): if '.' not in key: continue mod_name, func_name = key.split('.', 1) if mod_name not in states: states[mod_name] = {} states[mod_name][func_name] = func __context__['pyobjects_states'] = states
python
def load_states(): ''' This loads our states into the salt __context__ ''' states = {} # the loader expects to find pillar & grain data __opts__['grains'] = salt.loader.grains(__opts__) __opts__['pillar'] = __pillar__ lazy_utils = salt.loader.utils(__opts__) lazy_funcs = salt.loader.minion_mods(__opts__, utils=lazy_utils) lazy_serializers = salt.loader.serializers(__opts__) lazy_states = salt.loader.states(__opts__, lazy_funcs, lazy_utils, lazy_serializers) # TODO: some way to lazily do this? This requires loading *all* state modules for key, func in six.iteritems(lazy_states): if '.' not in key: continue mod_name, func_name = key.split('.', 1) if mod_name not in states: states[mod_name] = {} states[mod_name][func_name] = func __context__['pyobjects_states'] = states
[ "def", "load_states", "(", ")", ":", "states", "=", "{", "}", "# the loader expects to find pillar & grain data", "__opts__", "[", "'grains'", "]", "=", "salt", ".", "loader", ".", "grains", "(", "__opts__", ")", "__opts__", "[", "'pillar'", "]", "=", "__pilla...
This loads our states into the salt __context__
[ "This", "loads", "our", "states", "into", "the", "salt", "__context__" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/pyobjects.py#L339-L365
train
saltstack/salt
salt/modules/google_chat.py
send_message
def send_message(url, message): ''' Send a message to the google chat room specified in the webhook url. .. code-block:: bash salt '*' google_chat.send_message "https://chat.googleapis.com/v1/spaces/example_space/messages?key=example_key" "This is a test message" ''' headers = {'Content-Type': 'application/json'} data = {'text': message} result = __utils__['http.query'](url, 'POST', data=json.dumps(data), header_dict=headers, decode=True, status=True) if result.get('status', 0) == 200: return True else: return False
python
def send_message(url, message): ''' Send a message to the google chat room specified in the webhook url. .. code-block:: bash salt '*' google_chat.send_message "https://chat.googleapis.com/v1/spaces/example_space/messages?key=example_key" "This is a test message" ''' headers = {'Content-Type': 'application/json'} data = {'text': message} result = __utils__['http.query'](url, 'POST', data=json.dumps(data), header_dict=headers, decode=True, status=True) if result.get('status', 0) == 200: return True else: return False
[ "def", "send_message", "(", "url", ",", "message", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "data", "=", "{", "'text'", ":", "message", "}", "result", "=", "__utils__", "[", "'http.query'", "]", "(", "url", ",", "'...
Send a message to the google chat room specified in the webhook url. .. code-block:: bash salt '*' google_chat.send_message "https://chat.googleapis.com/v1/spaces/example_space/messages?key=example_key" "This is a test message"
[ "Send", "a", "message", "to", "the", "google", "chat", "room", "specified", "in", "the", "webhook", "url", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/google_chat.py#L36-L56
train
saltstack/salt
salt/utils/error.py
raise_error
def raise_error(name=None, args=None, message=''): ''' Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception" ''' name = name or 'Exception' if hasattr(salt.exceptions, name): ex = getattr(salt.exceptions, name) elif hasattr(exceptions, name): ex = getattr(exceptions, name) else: name = 'SaltException' ex = getattr(salt.exceptions, name) if args is not None: raise ex(*args) else: raise ex(message)
python
def raise_error(name=None, args=None, message=''): ''' Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception" ''' name = name or 'Exception' if hasattr(salt.exceptions, name): ex = getattr(salt.exceptions, name) elif hasattr(exceptions, name): ex = getattr(exceptions, name) else: name = 'SaltException' ex = getattr(salt.exceptions, name) if args is not None: raise ex(*args) else: raise ex(message)
[ "def", "raise_error", "(", "name", "=", "None", ",", "args", "=", "None", ",", "message", "=", "''", ")", ":", "name", "=", "name", "or", "'Exception'", "if", "hasattr", "(", "salt", ".", "exceptions", ",", "name", ")", ":", "ex", "=", "getattr", "...
Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception"
[ "Raise", "an", "exception", "with", "__name__", "from", "name", "args", "from", "args", "If", "args", "is", "None", "Otherwise", "message", "from", "message", "\\", "If", "name", "is", "empty", "then", "use", "Exception" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/error.py#L16-L33
train
saltstack/salt
salt/utils/error.py
fire_exception
def fire_exception(exc, opts, job=None, node='minion'): ''' Fire raw exception across the event bus ''' if job is None: job = {} event = salt.utils.event.SaltEvent(node, opts=opts, listen=False) event.fire_event(pack_exception(exc), '_salt_error')
python
def fire_exception(exc, opts, job=None, node='minion'): ''' Fire raw exception across the event bus ''' if job is None: job = {} event = salt.utils.event.SaltEvent(node, opts=opts, listen=False) event.fire_event(pack_exception(exc), '_salt_error')
[ "def", "fire_exception", "(", "exc", ",", "opts", ",", "job", "=", "None", ",", "node", "=", "'minion'", ")", ":", "if", "job", "is", "None", ":", "job", "=", "{", "}", "event", "=", "salt", ".", "utils", ".", "event", ".", "SaltEvent", "(", "nod...
Fire raw exception across the event bus
[ "Fire", "raw", "exception", "across", "the", "event", "bus" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/error.py#L44-L51
train
saltstack/salt
salt/renderers/msgpack.py
render
def render(msgpack_data, saltenv='base', sls='', **kws): ''' Accepts a message pack string or a file object, renders said data back to a python dict. .. note: This renderer is NOT intended for use in creating sls files by hand, but exists to allow for data backends to serialize the highdata structure in an easily transportable way. This is to allow for more fluid fileserver backends that rely on pure data sources. :rtype: A Python data structure ''' if not isinstance(msgpack_data, six.string_types): msgpack_data = msgpack_data.read() if msgpack_data.startswith('#!'): msgpack_data = msgpack_data[(msgpack_data.find('\n') + 1):] if not msgpack_data.strip(): return {} return salt.utils.msgpack.loads(msgpack_data)
python
def render(msgpack_data, saltenv='base', sls='', **kws): ''' Accepts a message pack string or a file object, renders said data back to a python dict. .. note: This renderer is NOT intended for use in creating sls files by hand, but exists to allow for data backends to serialize the highdata structure in an easily transportable way. This is to allow for more fluid fileserver backends that rely on pure data sources. :rtype: A Python data structure ''' if not isinstance(msgpack_data, six.string_types): msgpack_data = msgpack_data.read() if msgpack_data.startswith('#!'): msgpack_data = msgpack_data[(msgpack_data.find('\n') + 1):] if not msgpack_data.strip(): return {} return salt.utils.msgpack.loads(msgpack_data)
[ "def", "render", "(", "msgpack_data", ",", "saltenv", "=", "'base'", ",", "sls", "=", "''", ",", "*", "*", "kws", ")", ":", "if", "not", "isinstance", "(", "msgpack_data", ",", "six", ".", "string_types", ")", ":", "msgpack_data", "=", "msgpack_data", ...
Accepts a message pack string or a file object, renders said data back to a python dict. .. note: This renderer is NOT intended for use in creating sls files by hand, but exists to allow for data backends to serialize the highdata structure in an easily transportable way. This is to allow for more fluid fileserver backends that rely on pure data sources. :rtype: A Python data structure
[ "Accepts", "a", "message", "pack", "string", "or", "a", "file", "object", "renders", "said", "data", "back", "to", "a", "python", "dict", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/msgpack.py#L9-L29
train
saltstack/salt
salt/daemons/masterapi.py
init_git_pillar
def init_git_pillar(opts): ''' Clear out the ext pillar caches, used when the master starts ''' ret = [] for opts_dict in [x for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: pillar = salt.utils.gitfs.GitPillar( opts, opts_dict['git'], per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=git_pillar.PER_REMOTE_ONLY, global_only=git_pillar.GLOBAL_ONLY) ret.append(pillar) except salt.exceptions.FileserverConfigError: if opts.get('git_pillar_verify_config', True): raise else: log.critical('Could not initialize git_pillar') return ret
python
def init_git_pillar(opts): ''' Clear out the ext pillar caches, used when the master starts ''' ret = [] for opts_dict in [x for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: pillar = salt.utils.gitfs.GitPillar( opts, opts_dict['git'], per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=git_pillar.PER_REMOTE_ONLY, global_only=git_pillar.GLOBAL_ONLY) ret.append(pillar) except salt.exceptions.FileserverConfigError: if opts.get('git_pillar_verify_config', True): raise else: log.critical('Could not initialize git_pillar') return ret
[ "def", "init_git_pillar", "(", "opts", ")", ":", "ret", "=", "[", "]", "for", "opts_dict", "in", "[", "x", "for", "x", "in", "opts", ".", "get", "(", "'ext_pillar'", ",", "[", "]", ")", "]", ":", "if", "'git'", "in", "opts_dict", ":", "try", ":",...
Clear out the ext pillar caches, used when the master starts
[ "Clear", "out", "the", "ext", "pillar", "caches", "used", "when", "the", "master", "starts" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L68-L88
train
saltstack/salt
salt/daemons/masterapi.py
clean_fsbackend
def clean_fsbackend(opts): ''' Clean out the old fileserver backends ''' # Clear remote fileserver backend caches so they get recreated for backend in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing %sfs env cache', backend) try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable to clear env cache file %s: %s', env_cache, exc ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable to file_lists cache file %s: %s', cache_file, exc )
python
def clean_fsbackend(opts): ''' Clean out the old fileserver backends ''' # Clear remote fileserver backend caches so they get recreated for backend in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing %sfs env cache', backend) try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable to clear env cache file %s: %s', env_cache, exc ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable to file_lists cache file %s: %s', cache_file, exc )
[ "def", "clean_fsbackend", "(", "opts", ")", ":", "# Clear remote fileserver backend caches so they get recreated", "for", "backend", "in", "(", "'git'", ",", "'hg'", ",", "'svn'", ")", ":", "if", "backend", "in", "opts", "[", "'fileserver_backend'", "]", ":", "env...
Clean out the old fileserver backends
[ "Clean", "out", "the", "old", "fileserver", "backends" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L91-L130
train
saltstack/salt
salt/daemons/masterapi.py
clean_expired_tokens
def clean_expired_tokens(opts): ''' Clean expired tokens from the master ''' loadauth = salt.auth.LoadAuth(opts) for tok in loadauth.list_tokens(): token_data = loadauth.get_tok(tok) if 'expire' not in token_data or token_data.get('expire', 0) < time.time(): loadauth.rm_token(tok)
python
def clean_expired_tokens(opts): ''' Clean expired tokens from the master ''' loadauth = salt.auth.LoadAuth(opts) for tok in loadauth.list_tokens(): token_data = loadauth.get_tok(tok) if 'expire' not in token_data or token_data.get('expire', 0) < time.time(): loadauth.rm_token(tok)
[ "def", "clean_expired_tokens", "(", "opts", ")", ":", "loadauth", "=", "salt", ".", "auth", ".", "LoadAuth", "(", "opts", ")", "for", "tok", "in", "loadauth", ".", "list_tokens", "(", ")", ":", "token_data", "=", "loadauth", ".", "get_tok", "(", "tok", ...
Clean expired tokens from the master
[ "Clean", "expired", "tokens", "from", "the", "master" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L133-L141
train
saltstack/salt
salt/daemons/masterapi.py
clean_old_jobs
def clean_old_jobs(opts): ''' Clean out the old jobs from the job cache ''' # TODO: better way to not require creating the masterminion every time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False, ) # If the master job cache has a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]()
python
def clean_old_jobs(opts): ''' Clean out the old jobs from the job cache ''' # TODO: better way to not require creating the masterminion every time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False, ) # If the master job cache has a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]()
[ "def", "clean_old_jobs", "(", "opts", ")", ":", "# TODO: better way to not require creating the masterminion every time?", "mminion", "=", "salt", ".", "minion", ".", "MasterMinion", "(", "opts", ",", "states", "=", "False", ",", "rend", "=", "False", ",", ")", "#...
Clean out the old jobs from the job cache
[ "Clean", "out", "the", "old", "jobs", "from", "the", "job", "cache" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L162-L175
train
saltstack/salt
salt/daemons/masterapi.py
clean_proc_dir
def clean_proc_dir(opts): ''' Clean out old tracked jobs running on the master Generally, anything tracking a job should remove the job once the job has finished. However, this will remove any jobs that for some reason were not properly removed when finished or errored. ''' serial = salt.payload.Serial(opts) proc_dir = os.path.join(opts['cachedir'], 'proc') for fn_ in os.listdir(proc_dir): proc_file = os.path.join(*[proc_dir, fn_]) data = salt.utils.master.read_proc_file(proc_file, opts) if not data: try: log.warning( "Found proc file %s without proper data. Removing from tracked proc files.", proc_file ) os.remove(proc_file) except (OSError, IOError) as err: log.error('Unable to remove proc file: %s.', err) continue if not salt.utils.master.is_pid_healthy(data['pid']): try: log.warning( "PID %s not owned by salt or no longer running. Removing tracked proc file %s", data['pid'], proc_file ) os.remove(proc_file) except (OSError, IOError) as err: log.error('Unable to remove proc file: %s.', err)
python
def clean_proc_dir(opts): ''' Clean out old tracked jobs running on the master Generally, anything tracking a job should remove the job once the job has finished. However, this will remove any jobs that for some reason were not properly removed when finished or errored. ''' serial = salt.payload.Serial(opts) proc_dir = os.path.join(opts['cachedir'], 'proc') for fn_ in os.listdir(proc_dir): proc_file = os.path.join(*[proc_dir, fn_]) data = salt.utils.master.read_proc_file(proc_file, opts) if not data: try: log.warning( "Found proc file %s without proper data. Removing from tracked proc files.", proc_file ) os.remove(proc_file) except (OSError, IOError) as err: log.error('Unable to remove proc file: %s.', err) continue if not salt.utils.master.is_pid_healthy(data['pid']): try: log.warning( "PID %s not owned by salt or no longer running. Removing tracked proc file %s", data['pid'], proc_file ) os.remove(proc_file) except (OSError, IOError) as err: log.error('Unable to remove proc file: %s.', err)
[ "def", "clean_proc_dir", "(", "opts", ")", ":", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "opts", ")", "proc_dir", "=", "os", ".", "path", ".", "join", "(", "opts", "[", "'cachedir'", "]", ",", "'proc'", ")", "for", "fn_", "in", "o...
Clean out old tracked jobs running on the master Generally, anything tracking a job should remove the job once the job has finished. However, this will remove any jobs that for some reason were not properly removed when finished or errored.
[ "Clean", "out", "old", "tracked", "jobs", "running", "on", "the", "master" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L178-L211
train
saltstack/salt
salt/daemons/masterapi.py
access_keys
def access_keys(opts): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.user.get_user()) for user in acl_users: log.info('Preparing the %s key for local communication', user) key = mk_key(opts, user) if key is not None: keys[user] = key # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterapi access_keys function') for user in pwd.getpwall(): user = user.pw_name if user not in keys and salt.utils.stringutils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) log.profile('End pwd.getpwall() call in masterapi access_keys function') return keys
python
def access_keys(opts): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.user.get_user()) for user in acl_users: log.info('Preparing the %s key for local communication', user) key = mk_key(opts, user) if key is not None: keys[user] = key # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterapi access_keys function') for user in pwd.getpwall(): user = user.pw_name if user not in keys and salt.utils.stringutils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) log.profile('End pwd.getpwall() call in masterapi access_keys function') return keys
[ "def", "access_keys", "(", "opts", ")", ":", "# TODO: Need a way to get all available users for systems not supported by pwd module.", "# For now users pattern matching will not work for publisher_acl.", "keys", "=", "{", "}", "publisher_acl", "=", "opts", "[", "'publisher_acl'...
A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root.
[ "A", "key", "needs", "to", "be", "placed", "in", "the", "filesystem", "with", "permissions", "0400", "so", "clients", "are", "required", "to", "run", "as", "root", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L259-L287
train
saltstack/salt
salt/daemons/masterapi.py
fileserver_update
def fileserver_update(fileserver): ''' Update the fileserver backends, requires that a salt.fileserver.Fileserver object be passed in ''' try: if not fileserver.servers: log.error( 'No fileservers loaded, the master will not be able to ' 'serve files to minions' ) raise salt.exceptions.SaltMasterError('No fileserver backends available') fileserver.update() except Exception as exc: log.error( 'Exception %s occurred in file server update', exc, exc_info_on_loglevel=logging.DEBUG )
python
def fileserver_update(fileserver): ''' Update the fileserver backends, requires that a salt.fileserver.Fileserver object be passed in ''' try: if not fileserver.servers: log.error( 'No fileservers loaded, the master will not be able to ' 'serve files to minions' ) raise salt.exceptions.SaltMasterError('No fileserver backends available') fileserver.update() except Exception as exc: log.error( 'Exception %s occurred in file server update', exc, exc_info_on_loglevel=logging.DEBUG )
[ "def", "fileserver_update", "(", "fileserver", ")", ":", "try", ":", "if", "not", "fileserver", ".", "servers", ":", "log", ".", "error", "(", "'No fileservers loaded, the master will not be able to '", "'serve files to minions'", ")", "raise", "salt", ".", "exception...
Update the fileserver backends, requires that a salt.fileserver.Fileserver object be passed in
[ "Update", "the", "fileserver", "backends", "requires", "that", "a", "salt", ".", "fileserver", ".", "Fileserver", "object", "be", "passed", "in" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L290-L307
train
saltstack/salt
salt/daemons/masterapi.py
AutoKey.check_permissions
def check_permissions(self, filename): ''' Check if the specified filename has correct permissions ''' if salt.utils.platform.is_windows(): return True # After we've ascertained we're not on windows groups = salt.utils.user.get_gid_list(self.opts['user'], include_default=False) fmode = os.stat(filename) if stat.S_IWOTH & fmode.st_mode: # don't allow others to write to the file return False if stat.S_IWGRP & fmode.st_mode: # if the group has write access only allow with permissive_pki_access if not self.opts.get('permissive_pki_access', False): return False elif os.getuid() == 0 and fmode.st_gid not in groups: # if salt is root it has to be in the group that has write access # this gives the group 'permission' to have write access return False return True
python
def check_permissions(self, filename): ''' Check if the specified filename has correct permissions ''' if salt.utils.platform.is_windows(): return True # After we've ascertained we're not on windows groups = salt.utils.user.get_gid_list(self.opts['user'], include_default=False) fmode = os.stat(filename) if stat.S_IWOTH & fmode.st_mode: # don't allow others to write to the file return False if stat.S_IWGRP & fmode.st_mode: # if the group has write access only allow with permissive_pki_access if not self.opts.get('permissive_pki_access', False): return False elif os.getuid() == 0 and fmode.st_gid not in groups: # if salt is root it has to be in the group that has write access # this gives the group 'permission' to have write access return False return True
[ "def", "check_permissions", "(", "self", ",", "filename", ")", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "return", "True", "# After we've ascertained we're not on windows", "groups", "=", "salt", ".", "utils", ".", "us...
Check if the specified filename has correct permissions
[ "Check", "if", "the", "specified", "filename", "has", "correct", "permissions" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L318-L342
train
saltstack/salt
salt/daemons/masterapi.py
AutoKey.check_signing_file
def check_signing_file(self, keyid, signing_file): ''' Check a keyid for membership in a signing file ''' if not signing_file or not os.path.exists(signing_file): return False if not self.check_permissions(signing_file): log.warning('Wrong permissions for %s, ignoring content', signing_file) return False mtime = os.path.getmtime(signing_file) if self.signing_files.get(signing_file, {}).get('mtime') != mtime: self.signing_files.setdefault(signing_file, {})['mtime'] = mtime with salt.utils.files.fopen(signing_file, 'r') as fp_: self.signing_files[signing_file]['data'] = [ entry for entry in [line.strip() for line in fp_] if not entry.strip().startswith('#') ] return any(salt.utils.stringutils.expr_match(keyid, line) for line in self.signing_files[signing_file].get('data', []))
python
def check_signing_file(self, keyid, signing_file): ''' Check a keyid for membership in a signing file ''' if not signing_file or not os.path.exists(signing_file): return False if not self.check_permissions(signing_file): log.warning('Wrong permissions for %s, ignoring content', signing_file) return False mtime = os.path.getmtime(signing_file) if self.signing_files.get(signing_file, {}).get('mtime') != mtime: self.signing_files.setdefault(signing_file, {})['mtime'] = mtime with salt.utils.files.fopen(signing_file, 'r') as fp_: self.signing_files[signing_file]['data'] = [ entry for entry in [line.strip() for line in fp_] if not entry.strip().startswith('#') ] return any(salt.utils.stringutils.expr_match(keyid, line) for line in self.signing_files[signing_file].get('data', []))
[ "def", "check_signing_file", "(", "self", ",", "keyid", ",", "signing_file", ")", ":", "if", "not", "signing_file", "or", "not", "os", ".", "path", ".", "exists", "(", "signing_file", ")", ":", "return", "False", "if", "not", "self", ".", "check_permission...
Check a keyid for membership in a signing file
[ "Check", "a", "keyid", "for", "membership", "in", "a", "signing", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L344-L363
train
saltstack/salt
salt/daemons/masterapi.py
AutoKey.check_autosign_dir
def check_autosign_dir(self, keyid): ''' Check a keyid for membership in a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes = self.opts.get('autosign_timeout', 120) if expire_minutes > 0: min_time = time.time() - (60 * int(expire_minutes)) for root, dirs, filenames in salt.utils.path.os_walk(autosign_dir): for f in filenames: stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime < min_time: log.warning('Autosign keyid expired %s', stub_file) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False os.remove(stub_file) return True
python
def check_autosign_dir(self, keyid): ''' Check a keyid for membership in a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes = self.opts.get('autosign_timeout', 120) if expire_minutes > 0: min_time = time.time() - (60 * int(expire_minutes)) for root, dirs, filenames in salt.utils.path.os_walk(autosign_dir): for f in filenames: stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime < min_time: log.warning('Autosign keyid expired %s', stub_file) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False os.remove(stub_file) return True
[ "def", "check_autosign_dir", "(", "self", ",", "keyid", ")", ":", "autosign_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "'minions_autosign'", ")", "# cleanup expired files", "expire_minutes", "=", "self", ...
Check a keyid for membership in a autosign directory.
[ "Check", "a", "keyid", "for", "membership", "in", "a", "autosign", "directory", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L365-L387
train
saltstack/salt
salt/daemons/masterapi.py
AutoKey.check_autosign_grains
def check_autosign_grains(self, autosign_grains): ''' Check for matching grains in the autosign_grains_dir. ''' if not autosign_grains or 'autosign_grains_dir' not in self.opts: return False autosign_grains_dir = self.opts['autosign_grains_dir'] for root, dirs, filenames in os.walk(autosign_grains_dir): for grain in filenames: if grain in autosign_grains: grain_file = os.path.join(autosign_grains_dir, grain) if not self.check_permissions(grain_file): log.warning( 'Wrong permissions for %s, ignoring content', grain_file ) continue with salt.utils.files.fopen(grain_file, 'r') as f: for line in f: line = salt.utils.stringutils.to_unicode(line).strip() if line.startswith('#'): continue if autosign_grains[grain] == line: return True return False
python
def check_autosign_grains(self, autosign_grains): ''' Check for matching grains in the autosign_grains_dir. ''' if not autosign_grains or 'autosign_grains_dir' not in self.opts: return False autosign_grains_dir = self.opts['autosign_grains_dir'] for root, dirs, filenames in os.walk(autosign_grains_dir): for grain in filenames: if grain in autosign_grains: grain_file = os.path.join(autosign_grains_dir, grain) if not self.check_permissions(grain_file): log.warning( 'Wrong permissions for %s, ignoring content', grain_file ) continue with salt.utils.files.fopen(grain_file, 'r') as f: for line in f: line = salt.utils.stringutils.to_unicode(line).strip() if line.startswith('#'): continue if autosign_grains[grain] == line: return True return False
[ "def", "check_autosign_grains", "(", "self", ",", "autosign_grains", ")", ":", "if", "not", "autosign_grains", "or", "'autosign_grains_dir'", "not", "in", "self", ".", "opts", ":", "return", "False", "autosign_grains_dir", "=", "self", ".", "opts", "[", "'autosi...
Check for matching grains in the autosign_grains_dir.
[ "Check", "for", "matching", "grains", "in", "the", "autosign_grains_dir", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L389-L416
train
saltstack/salt
salt/daemons/masterapi.py
AutoKey.check_autosign
def check_autosign(self, keyid, autosign_grains=None): ''' Checks if the specified keyid should automatically be signed. ''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return True if self.check_autosign_grains(autosign_grains): return True return False
python
def check_autosign(self, keyid, autosign_grains=None): ''' Checks if the specified keyid should automatically be signed. ''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return True if self.check_autosign_grains(autosign_grains): return True return False
[ "def", "check_autosign", "(", "self", ",", "keyid", ",", "autosign_grains", "=", "None", ")", ":", "if", "self", ".", "opts", "[", "'auto_accept'", "]", ":", "return", "True", "if", "self", ".", "check_signing_file", "(", "keyid", ",", "self", ".", "opts...
Checks if the specified keyid should automatically be signed.
[ "Checks", "if", "the", "specified", "keyid", "should", "automatically", "be", "signed", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L427-L439
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.__setup_fileserver
def __setup_fileserver(self): ''' Set the local file objects from the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_find = fs_._find_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs
python
def __setup_fileserver(self): ''' Set the local file objects from the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_find = fs_._find_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs
[ "def", "__setup_fileserver", "(", "self", ")", ":", "fs_", "=", "salt", ".", "fileserver", ".", "Fileserver", "(", "self", ".", "opts", ")", "self", ".", "_serve_file", "=", "fs_", ".", "serve_file", "self", ".", "_file_find", "=", "fs_", ".", "_find_fil...
Set the local file objects from the file server interface
[ "Set", "the", "local", "file", "objects", "from", "the", "file", "server", "interface" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L469-L481
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.__verify_minion_publish
def __verify_minion_publish(self, load): ''' Verify that the passed information authorized a minion to execute ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in load for key in ('fun', 'arg', 'tgt', 'ret', 'id')): return False # If the command will make a recursive publish don't run if re.match('publish.*', load['fun']): return False # Check the permissions for this minion perms = [] for match in self.opts['peer']: if re.match(match, load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_ = [] for arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ return self.ckminions.auth_check( perms, load['fun'], load['arg'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True)
python
def __verify_minion_publish(self, load): ''' Verify that the passed information authorized a minion to execute ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in load for key in ('fun', 'arg', 'tgt', 'ret', 'id')): return False # If the command will make a recursive publish don't run if re.match('publish.*', load['fun']): return False # Check the permissions for this minion perms = [] for match in self.opts['peer']: if re.match(match, load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_ = [] for arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ return self.ckminions.auth_check( perms, load['fun'], load['arg'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True)
[ "def", "__verify_minion_publish", "(", "self", ",", "load", ")", ":", "# Verify that the load is valid", "if", "'peer'", "not", "in", "self", ".", "opts", ":", "return", "False", "if", "not", "isinstance", "(", "self", ".", "opts", "[", "'peer'", "]", ",", ...
Verify that the passed information authorized a minion to execute
[ "Verify", "that", "the", "passed", "information", "authorized", "a", "minion", "to", "execute" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L483-L517
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._master_opts
def _master_opts(self, load): ''' Return the master options to the minion ''' mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy'] mopts['env_order'] = self.opts['env_order'] mopts['default_top'] = self.opts['default_top'] if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['state_top_saltenv'] = self.opts['state_top_saltenv'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_env'] = self.opts['jinja_env'] mopts['jinja_sls_env'] = self.opts['jinja_sls_env'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts
python
def _master_opts(self, load): ''' Return the master options to the minion ''' mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy'] mopts['env_order'] = self.opts['env_order'] mopts['default_top'] = self.opts['default_top'] if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['state_top_saltenv'] = self.opts['state_top_saltenv'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_env'] = self.opts['jinja_env'] mopts['jinja_sls_env'] = self.opts['jinja_sls_env'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts
[ "def", "_master_opts", "(", "self", ",", "load", ")", ":", "mopts", "=", "{", "}", "file_roots", "=", "{", "}", "envs", "=", "self", ".", "_file_envs", "(", ")", "for", "saltenv", "in", "envs", ":", "if", "saltenv", "not", "in", "file_roots", ":", ...
Return the master options to the minion
[ "Return", "the", "master", "options", "to", "the", "minion" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L519-L547
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._master_tops
def _master_tops(self, load, skip_verify=False): ''' Return the results from master_tops if configured ''' if not skip_verify: if 'id' not in load: log.error('Received call for external nodes without an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all configured master_tops interfaces opts = {} grains = {} ret = {} if 'opts' in load: opts = load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains'] for fun in self.tops: if fun not in self.opts.get('master_tops', {}): continue try: ret = salt.utils.dictupdate.merge(ret, self.tops[fun](opts=opts, grains=grains), merge_lists=True) except Exception as exc: # If anything happens in the top generation, log it and move on log.error( 'Top function %s failed with error %s for minion %s', fun, exc, load['id'] ) return ret
python
def _master_tops(self, load, skip_verify=False): ''' Return the results from master_tops if configured ''' if not skip_verify: if 'id' not in load: log.error('Received call for external nodes without an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all configured master_tops interfaces opts = {} grains = {} ret = {} if 'opts' in load: opts = load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains'] for fun in self.tops: if fun not in self.opts.get('master_tops', {}): continue try: ret = salt.utils.dictupdate.merge(ret, self.tops[fun](opts=opts, grains=grains), merge_lists=True) except Exception as exc: # If anything happens in the top generation, log it and move on log.error( 'Top function %s failed with error %s for minion %s', fun, exc, load['id'] ) return ret
[ "def", "_master_tops", "(", "self", ",", "load", ",", "skip_verify", "=", "False", ")", ":", "if", "not", "skip_verify", ":", "if", "'id'", "not", "in", "load", ":", "log", ".", "error", "(", "'Received call for external nodes without an id'", ")", "return", ...
Return the results from master_tops if configured
[ "Return", "the", "results", "from", "master_tops", "if", "configured" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L549-L580
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._mine_get
def _mine_get(self, load, skip_verify=False): ''' Gathers the data from the specified minions' mine ''' if not skip_verify: if any(key not in load for key in ('id', 'tgt', 'fun')): return {} if isinstance(load['fun'], six.string_types): functions = list(set(load['fun'].split(','))) _ret_dict = len(functions) > 1 elif isinstance(load['fun'], list): functions = load['fun'] _ret_dict = True else: return {} functions_allowed = [] if 'mine_get' in self.opts: # If master side acl defined. if not isinstance(self.opts['mine_get'], dict): return {} perms = set() for match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) for fun in functions: if any(re.match(perm, fun) for perm in perms): functions_allowed.append(fun) if not functions_allowed: return {} else: functions_allowed = functions ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret expr_form = load.get('expr_form') if expr_form is not None and 'tgt_type' not in load: salt.utils.versions.warn_until( 'Neon', '_mine_get: minion {0} uses pre-Nitrogen API key ' '"expr_form". Accepting for backwards compatibility ' 'but this is not guaranteed ' 'after the Neon release'.format(load['id']) ) match_type = expr_form else: match_type = load.get('tgt_type', 'glob') if match_type.lower() == 'pillar': match_type = 'pillar_exact' if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) _res = checker.check_minions( load['tgt'], match_type, greedy=False ) minions = _res['minions'] for minion in minions: fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine') if not isinstance(fdata, dict): continue if not _ret_dict and functions_allowed and functions_allowed[0] in fdata: ret[minion] = fdata.get(functions_allowed[0]) elif _ret_dict: for fun in list(set(functions_allowed) & set(fdata.keys())): ret.setdefault(fun, {})[minion] = fdata.get(fun) return ret
python
def _mine_get(self, load, skip_verify=False): ''' Gathers the data from the specified minions' mine ''' if not skip_verify: if any(key not in load for key in ('id', 'tgt', 'fun')): return {} if isinstance(load['fun'], six.string_types): functions = list(set(load['fun'].split(','))) _ret_dict = len(functions) > 1 elif isinstance(load['fun'], list): functions = load['fun'] _ret_dict = True else: return {} functions_allowed = [] if 'mine_get' in self.opts: # If master side acl defined. if not isinstance(self.opts['mine_get'], dict): return {} perms = set() for match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) for fun in functions: if any(re.match(perm, fun) for perm in perms): functions_allowed.append(fun) if not functions_allowed: return {} else: functions_allowed = functions ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret expr_form = load.get('expr_form') if expr_form is not None and 'tgt_type' not in load: salt.utils.versions.warn_until( 'Neon', '_mine_get: minion {0} uses pre-Nitrogen API key ' '"expr_form". Accepting for backwards compatibility ' 'but this is not guaranteed ' 'after the Neon release'.format(load['id']) ) match_type = expr_form else: match_type = load.get('tgt_type', 'glob') if match_type.lower() == 'pillar': match_type = 'pillar_exact' if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) _res = checker.check_minions( load['tgt'], match_type, greedy=False ) minions = _res['minions'] for minion in minions: fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine') if not isinstance(fdata, dict): continue if not _ret_dict and functions_allowed and functions_allowed[0] in fdata: ret[minion] = fdata.get(functions_allowed[0]) elif _ret_dict: for fun in list(set(functions_allowed) & set(fdata.keys())): ret.setdefault(fun, {})[minion] = fdata.get(fun) return ret
[ "def", "_mine_get", "(", "self", ",", "load", ",", "skip_verify", "=", "False", ")", ":", "if", "not", "skip_verify", ":", "if", "any", "(", "key", "not", "in", "load", "for", "key", "in", "(", "'id'", ",", "'tgt'", ",", "'fun'", ")", ")", ":", "...
Gathers the data from the specified minions' mine
[ "Gathers", "the", "data", "from", "the", "specified", "minions", "mine" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L582-L658
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._mine
def _mine(self, load, skip_verify=False): ''' Return the mine data ''' if not skip_verify: if 'id' not in load or 'data' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cbank = 'minions/{0}'.format(load['id']) ckey = 'mine' if not load.get('clear', False): data = self.cache.fetch(cbank, ckey) if isinstance(data, dict): data.update(load['data']) load['data'] = data self.cache.store(cbank, ckey, load['data']) return True
python
def _mine(self, load, skip_verify=False): ''' Return the mine data ''' if not skip_verify: if 'id' not in load or 'data' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cbank = 'minions/{0}'.format(load['id']) ckey = 'mine' if not load.get('clear', False): data = self.cache.fetch(cbank, ckey) if isinstance(data, dict): data.update(load['data']) load['data'] = data self.cache.store(cbank, ckey, load['data']) return True
[ "def", "_mine", "(", "self", ",", "load", ",", "skip_verify", "=", "False", ")", ":", "if", "not", "skip_verify", ":", "if", "'id'", "not", "in", "load", "or", "'data'", "not", "in", "load", ":", "return", "False", "if", "self", ".", "opts", ".", "...
Return the mine data
[ "Return", "the", "mine", "data" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L660-L676
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._mine_delete
def _mine_delete(self, load): ''' Allow the minion to delete a specific function from its own mine ''' if 'id' not in load or 'fun' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cbank = 'minions/{0}'.format(load['id']) ckey = 'mine' try: data = self.cache.fetch(cbank, ckey) if not isinstance(data, dict): return False if load['fun'] in data: del data[load['fun']] self.cache.store(cbank, ckey, data) except OSError: return False return True
python
def _mine_delete(self, load): ''' Allow the minion to delete a specific function from its own mine ''' if 'id' not in load or 'fun' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cbank = 'minions/{0}'.format(load['id']) ckey = 'mine' try: data = self.cache.fetch(cbank, ckey) if not isinstance(data, dict): return False if load['fun'] in data: del data[load['fun']] self.cache.store(cbank, ckey, data) except OSError: return False return True
[ "def", "_mine_delete", "(", "self", ",", "load", ")", ":", "if", "'id'", "not", "in", "load", "or", "'fun'", "not", "in", "load", ":", "return", "False", "if", "self", ".", "opts", ".", "get", "(", "'minion_data_cache'", ",", "False", ")", "or", "sel...
Allow the minion to delete a specific function from its own mine
[ "Allow", "the", "minion", "to", "delete", "a", "specific", "function", "from", "its", "own", "mine" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L678-L696
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._mine_flush
def _mine_flush(self, load, skip_verify=False): ''' Allow the minion to delete all of its own mine contents ''' if not skip_verify and 'id' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): return self.cache.flush('minions/{0}'.format(load['id']), 'mine') return True
python
def _mine_flush(self, load, skip_verify=False): ''' Allow the minion to delete all of its own mine contents ''' if not skip_verify and 'id' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): return self.cache.flush('minions/{0}'.format(load['id']), 'mine') return True
[ "def", "_mine_flush", "(", "self", ",", "load", ",", "skip_verify", "=", "False", ")", ":", "if", "not", "skip_verify", "and", "'id'", "not", "in", "load", ":", "return", "False", "if", "self", ".", "opts", ".", "get", "(", "'minion_data_cache'", ",", ...
Allow the minion to delete all of its own mine contents
[ "Allow", "the", "minion", "to", "delete", "all", "of", "its", "own", "mine", "contents" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L698-L706
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._file_recv
def _file_recv(self, load): ''' Allows minions to send files to the master, files are sent to the master file cache ''' if any(key not in load for key in ('id', 'path', 'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0') return False if load.get('size', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: %s', file_recv_max_size ) return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: %s', file_recv_max_size ) return False # Normalize Windows paths normpath = load['path'] if ':' in normpath: # make sure double backslashes are normalized normpath = normpath.replace('\\', '/') normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode = 'wb' with salt.utils.files.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(salt.utils.stringutils.to_str(load['data'])) return True
python
def _file_recv(self, load): ''' Allows minions to send files to the master, files are sent to the master file cache ''' if any(key not in load for key in ('id', 'path', 'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0') return False if load.get('size', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: %s', file_recv_max_size ) return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: %s', file_recv_max_size ) return False # Normalize Windows paths normpath = load['path'] if ':' in normpath: # make sure double backslashes are normalized normpath = normpath.replace('\\', '/') normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode = 'wb' with salt.utils.files.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(salt.utils.stringutils.to_str(load['data'])) return True
[ "def", "_file_recv", "(", "self", ",", "load", ")", ":", "if", "any", "(", "key", "not", "in", "load", "for", "key", "in", "(", "'id'", ",", "'path'", ",", "'loc'", ")", ")", ":", "return", "False", "if", "not", "self", ".", "opts", "[", "'file_r...
Allows minions to send files to the master, files are sent to the master file cache
[ "Allows", "minions", "to", "send", "files", "to", "the", "master", "files", "are", "sent", "to", "the", "master", "file", "cache" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L708-L767
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._pillar
def _pillar(self, load): ''' Return the pillar data for the minion ''' if any(key not in load for key in ('id', 'grains')): return False # pillar = salt.pillar.Pillar( log.debug('Master _pillar using ext: %s', load.get('ext')) pillar = salt.pillar.get_pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar_override=load.get('pillar_override', {})) data = pillar.compile_pillar() if self.opts.get('minion_data_cache', False): self.cache.store('minions/{0}'.format(load['id']), 'data', {'grains': load['grains'], 'pillar': data}) if self.opts.get('minion_data_cache_events') is True: self.event.fire_event({'comment': 'Minion data cache refresh'}, salt.utils.event.tagify(load['id'], 'refresh', 'minion')) return data
python
def _pillar(self, load): ''' Return the pillar data for the minion ''' if any(key not in load for key in ('id', 'grains')): return False # pillar = salt.pillar.Pillar( log.debug('Master _pillar using ext: %s', load.get('ext')) pillar = salt.pillar.get_pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar_override=load.get('pillar_override', {})) data = pillar.compile_pillar() if self.opts.get('minion_data_cache', False): self.cache.store('minions/{0}'.format(load['id']), 'data', {'grains': load['grains'], 'pillar': data}) if self.opts.get('minion_data_cache_events') is True: self.event.fire_event({'comment': 'Minion data cache refresh'}, salt.utils.event.tagify(load['id'], 'refresh', 'minion')) return data
[ "def", "_pillar", "(", "self", ",", "load", ")", ":", "if", "any", "(", "key", "not", "in", "load", "for", "key", "in", "(", "'id'", ",", "'grains'", ")", ")", ":", "return", "False", "# pillar = salt.pillar.Pillar(", "log", ".", "debug", "(", "...
Return the pillar data for the minion
[ "Return", "the", "pillar", "data", "for", "the", "minion" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L769-L792
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._minion_event
def _minion_event(self, load): ''' Receive an event from the minion and fire it on the master event interface ''' if 'id' not in load: return False if 'events' not in load and ('tag' not in load or 'data' not in load): return False if 'events' in load: for event in load['events']: if 'data' in event: event_data = event['data'] else: event_data = event self.event.fire_event(event_data, event['tag']) # old dup event if load.get('pretag') is not None: self.event.fire_event(event_data, salt.utils.event.tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return True
python
def _minion_event(self, load): ''' Receive an event from the minion and fire it on the master event interface ''' if 'id' not in load: return False if 'events' not in load and ('tag' not in load or 'data' not in load): return False if 'events' in load: for event in load['events']: if 'data' in event: event_data = event['data'] else: event_data = event self.event.fire_event(event_data, event['tag']) # old dup event if load.get('pretag') is not None: self.event.fire_event(event_data, salt.utils.event.tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return True
[ "def", "_minion_event", "(", "self", ",", "load", ")", ":", "if", "'id'", "not", "in", "load", ":", "return", "False", "if", "'events'", "not", "in", "load", "and", "(", "'tag'", "not", "in", "load", "or", "'data'", "not", "in", "load", ")", ":", "...
Receive an event from the minion and fire it on the master event interface
[ "Receive", "an", "event", "from", "the", "minion", "and", "fire", "it", "on", "the", "master", "event", "interface" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L794-L815
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._return
def _return(self, load): ''' Handle the return data sent from the minions ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(self.opts)) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False if load['jid'] == 'req': # The minion is returning a standalone job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from %s for job %s', load['id'], load['jid']) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load, salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load)
python
def _return(self, load): ''' Handle the return data sent from the minions ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(self.opts)) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False if load['jid'] == 'req': # The minion is returning a standalone job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from %s for job %s', load['id'], load['jid']) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load, salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load)
[ "def", "_return", "(", "self", ",", "load", ")", ":", "# Generate EndTime", "endtime", "=", "salt", ".", "utils", ".", "jid", ".", "jid_to_time", "(", "salt", ".", "utils", ".", "jid", ".", "gen_jid", "(", "self", ".", "opts", ")", ")", "# If the retur...
Handle the return data sent from the minions
[ "Handle", "the", "return", "data", "sent", "from", "the", "minions" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L817-L848
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs._syndic_return
def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. ''' # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): return None # if we have a load, save it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key, 'return': item} if 'out' in load: ret['out'] = load['out'] self._return(ret)
python
def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. ''' # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): return None # if we have a load, save it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key, 'return': item} if 'out' in load: ret['out'] = load['out'] self._return(ret)
[ "def", "_syndic_return", "(", "self", ",", "load", ")", ":", "# Verify the load", "if", "any", "(", "key", "not", "in", "load", "for", "key", "in", "(", "'return'", ",", "'jid'", ",", "'id'", ")", ")", ":", "return", "None", "# if we have a load, save it",...
Receive a syndic minion return and format it to look like returns from individual minions.
[ "Receive", "a", "syndic", "minion", "return", "and", "format", "it", "to", "look", "like", "returns", "from", "individual", "minions", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L850-L870
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.minion_runner
def minion_runner(self, load): ''' Execute a runner from a minion, return the runner's function data ''' if 'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {} if any(key not in load for key in ('fun', 'arg', 'id')): return {} perms = set() for match in self.opts['peer_run']: if re.match(match, load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if re.match(perm, load['fun']): good = True if not good: # The minion is not who it says it is! # We don't want to listen to it! log.warning('Minion id %s is not who it says it is!', load['id']) return {} # Prepare the runner object opts = {} opts.update(self.opts) opts.update({'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']}) runner = salt.runner.Runner(opts) return runner.run()
python
def minion_runner(self, load): ''' Execute a runner from a minion, return the runner's function data ''' if 'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {} if any(key not in load for key in ('fun', 'arg', 'id')): return {} perms = set() for match in self.opts['peer_run']: if re.match(match, load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if re.match(perm, load['fun']): good = True if not good: # The minion is not who it says it is! # We don't want to listen to it! log.warning('Minion id %s is not who it says it is!', load['id']) return {} # Prepare the runner object opts = {} opts.update(self.opts) opts.update({'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']}) runner = salt.runner.Runner(opts) return runner.run()
[ "def", "minion_runner", "(", "self", ",", "load", ")", ":", "if", "'peer_run'", "not", "in", "self", ".", "opts", ":", "return", "{", "}", "if", "not", "isinstance", "(", "self", ".", "opts", "[", "'peer_run'", "]", ",", "dict", ")", ":", "return", ...
Execute a runner from a minion, return the runner's function data
[ "Execute", "a", "runner", "from", "a", "minion", "return", "the", "runner", "s", "function", "data" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L872-L908
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.pub_ret
def pub_ret(self, load, skip_verify=False): ''' Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. ''' if not skip_verify and any(key not in load for key in ('jid', 'id')): return {} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.files.fopen(jid_fn, 'r') as fp_: if not load['id'] == salt.utils.stringutils.to_unicode(fp_.read()): return {} return self.local.get_cache_returns(load['jid'])
python
def pub_ret(self, load, skip_verify=False): ''' Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. ''' if not skip_verify and any(key not in load for key in ('jid', 'id')): return {} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.files.fopen(jid_fn, 'r') as fp_: if not load['id'] == salt.utils.stringutils.to_unicode(fp_.read()): return {} return self.local.get_cache_returns(load['jid'])
[ "def", "pub_ret", "(", "self", ",", "load", ",", "skip_verify", "=", "False", ")", ":", "if", "not", "skip_verify", "and", "any", "(", "key", "not", "in", "load", "for", "key", "in", "(", "'jid'", ",", "'id'", ")", ")", ":", "return", "{", "}", "...
Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution.
[ "Request", "the", "return", "data", "from", "a", "specific", "jid", "only", "allowed", "if", "the", "requesting", "minion", "also", "initialted", "the", "execution", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L910-L928
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.minion_pub
def minion_pub(self, load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load = { 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'tgt_type': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['tgt_type'] = 'compound' else: return {} else: pub_load['tgt_type'] = load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) _res = self.ckminions.check_minions( load['tgt'], pub_load['tgt_type']) ret['minions'] = _res['minions'] auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, six.text_type(ret['jid'])) with salt.utils.files.fopen(jid_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(load['id'])) return ret
python
def minion_pub(self, load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load = { 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'tgt_type': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['tgt_type'] = 'compound' else: return {} else: pub_load['tgt_type'] = load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) _res = self.ckminions.check_minions( load['tgt'], pub_load['tgt_type']) ret['minions'] = _res['minions'] auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, six.text_type(ret['jid'])) with salt.utils.files.fopen(jid_fn, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(load['id'])) return ret
[ "def", "minion_pub", "(", "self", ",", "load", ")", ":", "if", "not", "self", ".", "__verify_minion_publish", "(", "load", ")", ":", "return", "{", "}", "# Set up the publication payload", "pub_load", "=", "{", "'fun'", ":", "load", "[", "'fun'", "]", ",",...
Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module
[ "Publish", "a", "command", "initiated", "from", "a", "minion", "this", "method", "executes", "minion", "restrictions", "so", "that", "the", "minion", "publication", "will", "only", "work", "if", "it", "is", "enabled", "in", "the", "config", ".", "The", "conf...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L930-L984
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.minion_publish
def minion_publish(self, load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load = { 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'tgt_type': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tmo' in load: try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['tmo']) log.warning(msg) return {} if 'timeout' in load: try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['timeout']) log.warning(msg) return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['tgt_type'] = 'compound' else: return {} else: pub_load['tgt_type'] = load['tgt_type'] pub_load['raw'] = True ret = {} for minion in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data = minion if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key] = val if load.get('form', '') != 'full': ret.pop('__jid__') return ret
python
def minion_publish(self, load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load = { 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'tgt_type': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tmo' in load: try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['tmo']) log.warning(msg) return {} if 'timeout' in load: try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['timeout']) log.warning(msg) return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['tgt_type'] = 'compound' else: return {} else: pub_load['tgt_type'] = load['tgt_type'] pub_load['raw'] = True ret = {} for minion in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data = minion if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key] = val if load.get('form', '') != 'full': ret.pop('__jid__') return ret
[ "def", "minion_publish", "(", "self", ",", "load", ")", ":", "if", "not", "self", ".", "__verify_minion_publish", "(", "load", ")", ":", "return", "{", "}", "# Set up the publication payload", "pub_load", "=", "{", "'fun'", ":", "load", "[", "'fun'", "]", ...
Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module
[ "Publish", "a", "command", "initiated", "from", "a", "minion", "this", "method", "executes", "minion", "restrictions", "so", "that", "the", "minion", "publication", "will", "only", "work", "if", "it", "is", "enabled", "in", "the", "config", ".", "The", "conf...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L986-L1060
train
saltstack/salt
salt/daemons/masterapi.py
RemoteFuncs.revoke_auth
def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key ''' if 'id' not in load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True
python
def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key ''' if 'id' not in load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True
[ "def", "revoke_auth", "(", "self", ",", "load", ")", ":", "if", "'id'", "not", "in", "load", ":", "return", "False", "keyapi", "=", "salt", ".", "key", ".", "Key", "(", "self", ".", "opts", ")", "keyapi", ".", "delete_key", "(", "load", "[", "'id'"...
Allow a minion to request revocation of its own key
[ "Allow", "a", "minion", "to", "request", "revocation", "of", "its", "own", "key" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L1062-L1072
train
saltstack/salt
salt/daemons/masterapi.py
LocalFuncs.runner
def runner(self, load): ''' Send a master control function back to the runner system ''' # All runner opts pass through eauth auth_type, err_name, key = self._prep_auth_info(load) # Authenticate auth_check = self.loadauth.check_authentication(load, auth_type) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. return {'error': error} # Authorize runner_check = self.ckminions.runner_check( auth_check.get('auth_list', []), load['fun'], load['kwarg'] ) username = auth_check.get('username') if not runner_check: return {'error': {'name': err_name, 'message': 'Authentication failure of type "{0}" occurred ' 'for user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and 'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check return runner_check # Authorized. Do the job! try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.asynchronous(fun, load.get('kwarg', {}), username) except Exception as exc: log.exception('Exception occurred while introspecting %s') return {'error': {'name': exc.__class__.__name__, 'args': exc.args, 'message': six.text_type(exc)}}
python
def runner(self, load): ''' Send a master control function back to the runner system ''' # All runner opts pass through eauth auth_type, err_name, key = self._prep_auth_info(load) # Authenticate auth_check = self.loadauth.check_authentication(load, auth_type) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. return {'error': error} # Authorize runner_check = self.ckminions.runner_check( auth_check.get('auth_list', []), load['fun'], load['kwarg'] ) username = auth_check.get('username') if not runner_check: return {'error': {'name': err_name, 'message': 'Authentication failure of type "{0}" occurred ' 'for user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and 'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check return runner_check # Authorized. Do the job! try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.asynchronous(fun, load.get('kwarg', {}), username) except Exception as exc: log.exception('Exception occurred while introspecting %s') return {'error': {'name': exc.__class__.__name__, 'args': exc.args, 'message': six.text_type(exc)}}
[ "def", "runner", "(", "self", ",", "load", ")", ":", "# All runner opts pass through eauth", "auth_type", ",", "err_name", ",", "key", "=", "self", ".", "_prep_auth_info", "(", "load", ")", "# Authenticate", "auth_check", "=", "self", ".", "loadauth", ".", "ch...
Send a master control function back to the runner system
[ "Send", "a", "master", "control", "function", "back", "to", "the", "runner", "system" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L1108-L1149
train
saltstack/salt
salt/daemons/masterapi.py
LocalFuncs.wheel
def wheel(self, load): ''' Send a master control function back to the wheel system ''' # All wheel ops pass through eauth auth_type, err_name, key = self._prep_auth_info(load) # Authenticate auth_check = self.loadauth.check_authentication( load, auth_type, key=key, show_username=True ) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. return {'error': error} # Authorize username = auth_check.get('username') if auth_type != 'user': wheel_check = self.ckminions.wheel_check( auth_check.get('auth_list', []), load['fun'], load['kwarg'] ) if not wheel_check: return {'error': {'name': err_name, 'message': 'Authentication failure of type "{0}" occurred for ' 'user {1}.'.format(auth_type, username)}} elif isinstance(wheel_check, dict) and 'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check # Authenticated. Do the job. jid = salt.utils.jid.gen_jid(self.opts) fun = load.pop('fun') tag = salt.utils.event.tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': username} try: self.event.fire_event(data, salt.utils.event.tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.exception('Exception occurred while introspecting %s', fun) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data}
python
def wheel(self, load): ''' Send a master control function back to the wheel system ''' # All wheel ops pass through eauth auth_type, err_name, key = self._prep_auth_info(load) # Authenticate auth_check = self.loadauth.check_authentication( load, auth_type, key=key, show_username=True ) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. return {'error': error} # Authorize username = auth_check.get('username') if auth_type != 'user': wheel_check = self.ckminions.wheel_check( auth_check.get('auth_list', []), load['fun'], load['kwarg'] ) if not wheel_check: return {'error': {'name': err_name, 'message': 'Authentication failure of type "{0}" occurred for ' 'user {1}.'.format(auth_type, username)}} elif isinstance(wheel_check, dict) and 'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check # Authenticated. Do the job. jid = salt.utils.jid.gen_jid(self.opts) fun = load.pop('fun') tag = salt.utils.event.tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': username} try: self.event.fire_event(data, salt.utils.event.tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.exception('Exception occurred while introspecting %s', fun) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, salt.utils.event.tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data}
[ "def", "wheel", "(", "self", ",", "load", ")", ":", "# All wheel ops pass through eauth", "auth_type", ",", "err_name", ",", "key", "=", "self", ".", "_prep_auth_info", "(", "load", ")", "# Authenticate", "auth_check", "=", "self", ".", "loadauth", ".", "check...
Send a master control function back to the wheel system
[ "Send", "a", "master", "control", "function", "back", "to", "the", "wheel", "system" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L1151-L1213
train
saltstack/salt
salt/daemons/masterapi.py
LocalFuncs.mk_token
def mk_token(self, load): ''' Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds. ''' token = self.loadauth.mk_token(load) if not token: log.warning('Authentication failure of type "eauth" occurred.') return '' return token
python
def mk_token(self, load): ''' Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds. ''' token = self.loadauth.mk_token(load) if not token: log.warning('Authentication failure of type "eauth" occurred.') return '' return token
[ "def", "mk_token", "(", "self", ",", "load", ")", ":", "token", "=", "self", ".", "loadauth", ".", "mk_token", "(", "load", ")", "if", "not", "token", ":", "log", ".", "warning", "(", "'Authentication failure of type \"eauth\" occurred.'", ")", "return", "''...
Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds.
[ "Create", "and", "return", "an", "authentication", "token", "the", "clear", "load", "needs", "to", "contain", "the", "eauth", "key", "and", "the", "needed", "authentication", "creds", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L1215-L1224
train
saltstack/salt
salt/daemons/masterapi.py
LocalFuncs.publish
def publish(self, load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' extra = load.get('kwargs', {}) publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist']) if publisher_acl.user_is_blacklisted(load['user']) or \ publisher_acl.cmd_is_blacklisted(load['fun']): log.error( '%s does not have permissions to run %s. Please contact ' 'your local administrator if you believe this is in error.', load['user'], load['fun'] ) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Retrieve the minions list delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) _res = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob'), delimiter ) minions = _res['minions'] # Check for external auth calls and authenticate auth_type, err_name, key = self._prep_auth_info(extra) if auth_type == 'user': auth_check = self.loadauth.check_authentication(load, auth_type, key=key) else: auth_check = self.loadauth.check_authentication(extra, auth_type) # Setup authorization list variable and error information auth_list = auth_check.get('auth_list', []) error = auth_check.get('error') err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type) if error: # Authentication error occurred: do not continue. log.warning(err_msg) return {'error': {'name': 'AuthenticationError', 'message': 'Authentication error occurred.'}} # All Token, Eauth, and non-root users must pass the authorization check if auth_type != 'user' or (auth_type == 'user' and auth_list): # Authorize the request authorized = self.ckminions.auth_check( auth_list, load['fun'], load['arg'], load['tgt'], load.get('tgt_type', 'glob'), minions=minions, # always accept find_job whitelist=['saltutil.find_job'], ) if not authorized: # Authorization error occurred. Log warning and do not continue. log.warning(err_msg) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Perform some specific auth_type tasks after the authorization check if auth_type == 'token': username = auth_check.get('username') load['user'] = username log.debug('Minion tokenized user = "%s"', username) elif auth_type == 'eauth': # The username we are attempting to auth with load['user'] = self.loadauth.load_name(extra) # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions } } # Retrieve the jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'minions': minions, } # Announce the job on the event bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, salt.utils.event.tagify([load['jid'], 'new'], 'job')) # Save the invocation information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for the external job cache ' '"%s" does not have a save_load function!', self.opts['ext_job_cache'] ) except Exception: log.critical( 'The specified returner threw a stack trace:', exc_info=True ) # always write out to the master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for the master job cache ' '"%s" does not have a save_load function!', self.opts['master_job_cache'] ) except Exception: log.critical( 'The specified returner threw a stack trace:', exc_info=True ) # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with Thomas Hatch before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. pub_load = { 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if 'id' in extra: pub_load['id'] = extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in load: pub_load['to'] = load['to'] if 'kwargs' in load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'ret_kwargs' in load['kwargs']: pub_load['ret_kwargs'] = load['kwargs'].get('ret_kwargs') if 'user' in load: log.info( 'User %s Published command %s with jid %s', load['user'], load['fun'], load['jid'] ) pub_load['user'] = load['user'] else: log.info( 'Published command %s with jid %s', load['fun'], load['jid'] ) log.debug('Published command details %s', pub_load) return {'ret': { 'jid': load['jid'], 'minions': minions }, 'pub': pub_load }
python
def publish(self, load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' extra = load.get('kwargs', {}) publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist']) if publisher_acl.user_is_blacklisted(load['user']) or \ publisher_acl.cmd_is_blacklisted(load['fun']): log.error( '%s does not have permissions to run %s. Please contact ' 'your local administrator if you believe this is in error.', load['user'], load['fun'] ) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Retrieve the minions list delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) _res = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob'), delimiter ) minions = _res['minions'] # Check for external auth calls and authenticate auth_type, err_name, key = self._prep_auth_info(extra) if auth_type == 'user': auth_check = self.loadauth.check_authentication(load, auth_type, key=key) else: auth_check = self.loadauth.check_authentication(extra, auth_type) # Setup authorization list variable and error information auth_list = auth_check.get('auth_list', []) error = auth_check.get('error') err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type) if error: # Authentication error occurred: do not continue. log.warning(err_msg) return {'error': {'name': 'AuthenticationError', 'message': 'Authentication error occurred.'}} # All Token, Eauth, and non-root users must pass the authorization check if auth_type != 'user' or (auth_type == 'user' and auth_list): # Authorize the request authorized = self.ckminions.auth_check( auth_list, load['fun'], load['arg'], load['tgt'], load.get('tgt_type', 'glob'), minions=minions, # always accept find_job whitelist=['saltutil.find_job'], ) if not authorized: # Authorization error occurred. Log warning and do not continue. log.warning(err_msg) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Perform some specific auth_type tasks after the authorization check if auth_type == 'token': username = auth_check.get('username') load['user'] = username log.debug('Minion tokenized user = "%s"', username) elif auth_type == 'eauth': # The username we are attempting to auth with load['user'] = self.loadauth.load_name(extra) # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions } } # Retrieve the jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'minions': minions, } # Announce the job on the event bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, salt.utils.event.tagify([load['jid'], 'new'], 'job')) # Save the invocation information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for the external job cache ' '"%s" does not have a save_load function!', self.opts['ext_job_cache'] ) except Exception: log.critical( 'The specified returner threw a stack trace:', exc_info=True ) # always write out to the master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for the master job cache ' '"%s" does not have a save_load function!', self.opts['master_job_cache'] ) except Exception: log.critical( 'The specified returner threw a stack trace:', exc_info=True ) # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with Thomas Hatch before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. pub_load = { 'fun': load['fun'], 'arg': salt.utils.args.parse_input( load['arg'], no_parse=load.get('no_parse', [])), 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if 'id' in extra: pub_load['id'] = extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in load: pub_load['to'] = load['to'] if 'kwargs' in load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'ret_kwargs' in load['kwargs']: pub_load['ret_kwargs'] = load['kwargs'].get('ret_kwargs') if 'user' in load: log.info( 'User %s Published command %s with jid %s', load['user'], load['fun'], load['jid'] ) pub_load['user'] = load['user'] else: log.info( 'Published command %s with jid %s', load['fun'], load['jid'] ) log.debug('Published command details %s', pub_load) return {'ret': { 'jid': load['jid'], 'minions': minions }, 'pub': pub_load }
[ "def", "publish", "(", "self", ",", "load", ")", ":", "extra", "=", "load", ".", "get", "(", "'kwargs'", ",", "{", "}", ")", "publisher_acl", "=", "salt", ".", "acl", ".", "PublisherACL", "(", "self", ".", "opts", "[", "'publisher_acl_blacklist'", "]",...
This method sends out publications to the minions, it can only be used by the LocalClient.
[ "This", "method", "sends", "out", "publications", "to", "the", "minions", "it", "can", "only", "be", "used", "by", "the", "LocalClient", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L1234-L1428
train
saltstack/salt
salt/engines/slack.py
start
def start(token, control=False, trigger='!', groups=None, groups_pillar_name=None, fire_all=False, tag='salt/engines/slack'): ''' Listen to slack events and forward them to salt, new version ''' if (not token) or (not token.startswith('xoxb')): time.sleep(2) # don't respawn too quickly log.error('Slack bot token not found, bailing...') raise UserWarning('Slack Engine bot token not configured') try: client = SlackClient(token=token) message_generator = client.generate_triggered_messages(token, trigger, groups, groups_pillar_name) client.run_commands_from_slack_async(message_generator, fire_all, tag, control) except Exception: raise Exception('{}'.format(traceback.format_exc()))
python
def start(token, control=False, trigger='!', groups=None, groups_pillar_name=None, fire_all=False, tag='salt/engines/slack'): ''' Listen to slack events and forward them to salt, new version ''' if (not token) or (not token.startswith('xoxb')): time.sleep(2) # don't respawn too quickly log.error('Slack bot token not found, bailing...') raise UserWarning('Slack Engine bot token not configured') try: client = SlackClient(token=token) message_generator = client.generate_triggered_messages(token, trigger, groups, groups_pillar_name) client.run_commands_from_slack_async(message_generator, fire_all, tag, control) except Exception: raise Exception('{}'.format(traceback.format_exc()))
[ "def", "start", "(", "token", ",", "control", "=", "False", ",", "trigger", "=", "'!'", ",", "groups", "=", "None", ",", "groups_pillar_name", "=", "None", ",", "fire_all", "=", "False", ",", "tag", "=", "'salt/engines/slack'", ")", ":", "if", "(", "no...
Listen to slack events and forward them to salt, new version
[ "Listen", "to", "slack", "events", "and", "forward", "them", "to", "salt", "new", "version" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L828-L849
train
saltstack/salt
salt/engines/slack.py
SlackClient.get_slack_users
def get_slack_users(self, token): ''' Get all users from Slack ''' ret = salt.utils.slack.query(function='users', api_key=token, opts=__opts__) users = {} if 'message' in ret: for item in ret['message']: if 'is_bot' in item: if not item['is_bot']: users[item['name']] = item['id'] users[item['id']] = item['name'] return users
python
def get_slack_users(self, token): ''' Get all users from Slack ''' ret = salt.utils.slack.query(function='users', api_key=token, opts=__opts__) users = {} if 'message' in ret: for item in ret['message']: if 'is_bot' in item: if not item['is_bot']: users[item['name']] = item['id'] users[item['id']] = item['name'] return users
[ "def", "get_slack_users", "(", "self", ",", "token", ")", ":", "ret", "=", "salt", ".", "utils", ".", "slack", ".", "query", "(", "function", "=", "'users'", ",", "api_key", "=", "token", ",", "opts", "=", "__opts__", ")", "users", "=", "{", "}", "...
Get all users from Slack
[ "Get", "all", "users", "from", "Slack" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L197-L212
train
saltstack/salt
salt/engines/slack.py
SlackClient.get_slack_channels
def get_slack_channels(self, token): ''' Get all channel names from Slack ''' ret = salt.utils.slack.query( function='rooms', api_key=token, # These won't be honored until https://github.com/saltstack/salt/pull/41187/files is merged opts={ 'exclude_archived': True, 'exclude_members': True }) channels = {} if 'message' in ret: for item in ret['message']: channels[item['id']] = item['name'] return channels
python
def get_slack_channels(self, token): ''' Get all channel names from Slack ''' ret = salt.utils.slack.query( function='rooms', api_key=token, # These won't be honored until https://github.com/saltstack/salt/pull/41187/files is merged opts={ 'exclude_archived': True, 'exclude_members': True }) channels = {} if 'message' in ret: for item in ret['message']: channels[item['id']] = item['name'] return channels
[ "def", "get_slack_channels", "(", "self", ",", "token", ")", ":", "ret", "=", "salt", ".", "utils", ".", "slack", ".", "query", "(", "function", "=", "'rooms'", ",", "api_key", "=", "token", ",", "# These won't be honored until https://github.com/saltstack/salt/pu...
Get all channel names from Slack
[ "Get", "all", "channel", "names", "from", "Slack" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L214-L231
train
saltstack/salt
salt/engines/slack.py
SlackClient.get_config_groups
def get_config_groups(self, groups_conf, groups_pillar_name): ''' get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar ''' # Get groups # Default to returning something that'll never match ret_groups = { 'default': { 'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {} } } # allow for empty groups in the config file, and instead let some/all of this come # from pillar data. if not groups_conf: use_groups = {} else: use_groups = groups_conf # First obtain group lists from pillars, then in case there is any overlap, iterate over the groups # that come from pillars. The configuration in files on disk/from startup # will override any configs from pillars. They are meant to be complementary not to provide overrides. log.debug('use_groups %s', use_groups) try: groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()) except AttributeError: log.warning('Failed to get groups from %s: %s or from config: %s', groups_pillar_name, self._groups_from_pillar(groups_pillar_name), use_groups ) groups_gen = [] for name, config in groups_gen: log.info('Trying to get %s and %s to be useful', name, config) ret_groups.setdefault(name, { 'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {} }) try: ret_groups[name]['users'].update(set(config.get('users', []))) ret_groups[name]['commands'].update(set(config.get('commands', []))) ret_groups[name]['aliases'].update(config.get('aliases', {})) ret_groups[name]['default_target'].update(config.get('default_target', {})) ret_groups[name]['targets'].update(config.get('targets', {})) except (IndexError, AttributeError): log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name) log.debug('Got the groups: %s', ret_groups) return ret_groups
python
def get_config_groups(self, groups_conf, groups_pillar_name): ''' get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar ''' # Get groups # Default to returning something that'll never match ret_groups = { 'default': { 'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {} } } # allow for empty groups in the config file, and instead let some/all of this come # from pillar data. if not groups_conf: use_groups = {} else: use_groups = groups_conf # First obtain group lists from pillars, then in case there is any overlap, iterate over the groups # that come from pillars. The configuration in files on disk/from startup # will override any configs from pillars. They are meant to be complementary not to provide overrides. log.debug('use_groups %s', use_groups) try: groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()) except AttributeError: log.warning('Failed to get groups from %s: %s or from config: %s', groups_pillar_name, self._groups_from_pillar(groups_pillar_name), use_groups ) groups_gen = [] for name, config in groups_gen: log.info('Trying to get %s and %s to be useful', name, config) ret_groups.setdefault(name, { 'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {} }) try: ret_groups[name]['users'].update(set(config.get('users', []))) ret_groups[name]['commands'].update(set(config.get('commands', []))) ret_groups[name]['aliases'].update(config.get('aliases', {})) ret_groups[name]['default_target'].update(config.get('default_target', {})) ret_groups[name]['targets'].update(config.get('targets', {})) except (IndexError, AttributeError): log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name) log.debug('Got the groups: %s', ret_groups) return ret_groups
[ "def", "get_config_groups", "(", "self", ",", "groups_conf", ",", "groups_pillar_name", ")", ":", "# Get groups", "# Default to returning something that'll never match", "ret_groups", "=", "{", "'default'", ":", "{", "'users'", ":", "set", "(", ")", ",", "'commands'",...
get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar
[ "get", "info", "from", "groups", "in", "config", "and", "from", "the", "named", "pillar" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L233-L286
train
saltstack/salt
salt/engines/slack.py
SlackClient._groups_from_pillar
def _groups_from_pillar(self, pillar_name): ''' pillar_prefix is the pillar.get syntax for the pillar to be queried. Group name is gotten via the equivalent of using ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` in a jinja template. returns a dictionary (unless the pillar is mis-formatted) XXX: instead of using Caller, make the minion to use configurable so there could be some restrictions placed on what pillars can be used. ''' if pillar_name and __opts__['__role'] == 'minion': pillar_groups = __salt__['pillar.get'](pillar_name, {}) log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name) log.debug('pillar groups is %s', pillar_groups) log.debug('pillar groups type is %s', type(pillar_groups)) else: pillar_groups = {} return pillar_groups
python
def _groups_from_pillar(self, pillar_name): ''' pillar_prefix is the pillar.get syntax for the pillar to be queried. Group name is gotten via the equivalent of using ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` in a jinja template. returns a dictionary (unless the pillar is mis-formatted) XXX: instead of using Caller, make the minion to use configurable so there could be some restrictions placed on what pillars can be used. ''' if pillar_name and __opts__['__role'] == 'minion': pillar_groups = __salt__['pillar.get'](pillar_name, {}) log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name) log.debug('pillar groups is %s', pillar_groups) log.debug('pillar groups type is %s', type(pillar_groups)) else: pillar_groups = {} return pillar_groups
[ "def", "_groups_from_pillar", "(", "self", ",", "pillar_name", ")", ":", "if", "pillar_name", "and", "__opts__", "[", "'__role'", "]", "==", "'minion'", ":", "pillar_groups", "=", "__salt__", "[", "'pillar.get'", "]", "(", "pillar_name", ",", "{", "}", ")", ...
pillar_prefix is the pillar.get syntax for the pillar to be queried. Group name is gotten via the equivalent of using ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` in a jinja template. returns a dictionary (unless the pillar is mis-formatted) XXX: instead of using Caller, make the minion to use configurable so there could be some restrictions placed on what pillars can be used.
[ "pillar_prefix", "is", "the", "pillar", ".", "get", "syntax", "for", "the", "pillar", "to", "be", "queried", ".", "Group", "name", "is", "gotten", "via", "the", "equivalent", "of", "using", "salt", "[", "pillar", ".", "get", "]", "(", "{}", ":", "{}", ...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L288-L306
train
saltstack/salt
salt/engines/slack.py
SlackClient.fire
def fire(self, tag, msg): ''' This replaces a function in main called 'fire' It fires an event into the salt bus. ''' if __opts__.get('__role') == 'master': fire_master = salt.utils.event.get_master_event( __opts__, __opts__['sock_dir']).fire_master else: fire_master = None if fire_master: fire_master(msg, tag) else: __salt__['event.send'](tag, msg)
python
def fire(self, tag, msg): ''' This replaces a function in main called 'fire' It fires an event into the salt bus. ''' if __opts__.get('__role') == 'master': fire_master = salt.utils.event.get_master_event( __opts__, __opts__['sock_dir']).fire_master else: fire_master = None if fire_master: fire_master(msg, tag) else: __salt__['event.send'](tag, msg)
[ "def", "fire", "(", "self", ",", "tag", ",", "msg", ")", ":", "if", "__opts__", ".", "get", "(", "'__role'", ")", "==", "'master'", ":", "fire_master", "=", "salt", ".", "utils", ".", "event", ".", "get_master_event", "(", "__opts__", ",", "__opts__", ...
This replaces a function in main called 'fire' It fires an event into the salt bus.
[ "This", "replaces", "a", "function", "in", "main", "called", "fire" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L308-L324
train
saltstack/salt
salt/engines/slack.py
SlackClient.can_user_run
def can_user_run(self, user, command, groups): ''' Break out the permissions into the following: Check whether a user is in any group, including whether a group has the '*' membership :type user: str :param user: The username being checked against :type command: str :param command: The command that is being invoked (e.g. test.ping) :type groups: dict :param groups: the dictionary with groups permissions structure. :rtype: tuple :returns: On a successful permitting match, returns 2-element tuple that contains the name of the group that successfully matched, and a dictionary containing the configuration of the group so it can be referenced. On failure it returns an empty tuple ''' log.info('%s wants to run %s with groups %s', user, command, groups) for key, val in groups.items(): if user not in val['users']: if '*' not in val['users']: continue # this doesn't grant permissions, pass if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()): if '*' not in val['commands']: continue # again, pass log.info('Slack user %s permitted to run %s', user, command) return (key, val,) # matched this group, return the group log.info('Slack user %s denied trying to run %s', user, command) return ()
python
def can_user_run(self, user, command, groups): ''' Break out the permissions into the following: Check whether a user is in any group, including whether a group has the '*' membership :type user: str :param user: The username being checked against :type command: str :param command: The command that is being invoked (e.g. test.ping) :type groups: dict :param groups: the dictionary with groups permissions structure. :rtype: tuple :returns: On a successful permitting match, returns 2-element tuple that contains the name of the group that successfully matched, and a dictionary containing the configuration of the group so it can be referenced. On failure it returns an empty tuple ''' log.info('%s wants to run %s with groups %s', user, command, groups) for key, val in groups.items(): if user not in val['users']: if '*' not in val['users']: continue # this doesn't grant permissions, pass if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()): if '*' not in val['commands']: continue # again, pass log.info('Slack user %s permitted to run %s', user, command) return (key, val,) # matched this group, return the group log.info('Slack user %s denied trying to run %s', user, command) return ()
[ "def", "can_user_run", "(", "self", ",", "user", ",", "command", ",", "groups", ")", ":", "log", ".", "info", "(", "'%s wants to run %s with groups %s'", ",", "user", ",", "command", ",", "groups", ")", "for", "key", ",", "val", "in", "groups", ".", "ite...
Break out the permissions into the following: Check whether a user is in any group, including whether a group has the '*' membership :type user: str :param user: The username being checked against :type command: str :param command: The command that is being invoked (e.g. test.ping) :type groups: dict :param groups: the dictionary with groups permissions structure. :rtype: tuple :returns: On a successful permitting match, returns 2-element tuple that contains the name of the group that successfully matched, and a dictionary containing the configuration of the group so it can be referenced. On failure it returns an empty tuple
[ "Break", "out", "the", "permissions", "into", "the", "following", ":" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L326-L360
train
saltstack/salt
salt/engines/slack.py
SlackClient.commandline_to_list
def commandline_to_list(self, cmdline_str, trigger_string): ''' cmdline_str is the string of the command line trigger_string is the trigger string, to be removed ''' cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):]) # Remove slack url parsing # Translate target=<http://host.domain.net|host.domain.net> # to target=host.domain.net cmdlist = [] for cmditem in cmdline: pattern = r'(?P<begin>.*)(<.*\|)(?P<url>.*)(>)(?P<remainder>.*)' mtch = re.match(pattern, cmditem) if mtch: origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder') cmdlist.append(origtext) else: cmdlist.append(cmditem) return cmdlist
python
def commandline_to_list(self, cmdline_str, trigger_string): ''' cmdline_str is the string of the command line trigger_string is the trigger string, to be removed ''' cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):]) # Remove slack url parsing # Translate target=<http://host.domain.net|host.domain.net> # to target=host.domain.net cmdlist = [] for cmditem in cmdline: pattern = r'(?P<begin>.*)(<.*\|)(?P<url>.*)(>)(?P<remainder>.*)' mtch = re.match(pattern, cmditem) if mtch: origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder') cmdlist.append(origtext) else: cmdlist.append(cmditem) return cmdlist
[ "def", "commandline_to_list", "(", "self", ",", "cmdline_str", ",", "trigger_string", ")", ":", "cmdline", "=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "cmdline_str", "[", "len", "(", "trigger_string", ")", ":", "]", ")", "# Remove slack ...
cmdline_str is the string of the command line trigger_string is the trigger string, to be removed
[ "cmdline_str", "is", "the", "string", "of", "the", "command", "line", "trigger_string", "is", "the", "trigger", "string", "to", "be", "removed" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L362-L380
train
saltstack/salt
salt/engines/slack.py
SlackClient.control_message_target
def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string): '''Returns a tuple of (target, cmdline,) for the response Raises IndexError if a user can't be looked up from all_slack_users Returns (False, False) if the user doesn't have permission These are returned together because the commandline and the targeting interact with the group config (specifically aliases and targeting configuration) so taking care of them together works out. The cmdline that is returned is the actual list that should be processed by salt, and not the alias. ''' # Trim the trigger string from the front # cmdline = _text[1:].split(' ', 1) cmdline = self.commandline_to_list(text, trigger_string) permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups) log.debug('slack_user_name is %s and the permitted group is %s', slack_user_name, permitted_group) if not permitted_group: return (False, None, cmdline[0]) if not slack_user_name: return (False, None, cmdline[0]) # maybe there are aliases, so check on that if cmdline[0] in permitted_group[1].get('aliases', {}).keys(): use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]].get('cmd', ''), '') # Include any additional elements from cmdline use_cmdline.extend(cmdline[1:]) else: use_cmdline = cmdline target = self.get_target(permitted_group, cmdline, use_cmdline) # Remove target and tgt_type from commandline # that is sent along to Salt use_cmdline = [item for item in use_cmdline if all(not item.startswith(x) for x in ('target', 'tgt_type'))] return (True, target, use_cmdline)
python
def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string): '''Returns a tuple of (target, cmdline,) for the response Raises IndexError if a user can't be looked up from all_slack_users Returns (False, False) if the user doesn't have permission These are returned together because the commandline and the targeting interact with the group config (specifically aliases and targeting configuration) so taking care of them together works out. The cmdline that is returned is the actual list that should be processed by salt, and not the alias. ''' # Trim the trigger string from the front # cmdline = _text[1:].split(' ', 1) cmdline = self.commandline_to_list(text, trigger_string) permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups) log.debug('slack_user_name is %s and the permitted group is %s', slack_user_name, permitted_group) if not permitted_group: return (False, None, cmdline[0]) if not slack_user_name: return (False, None, cmdline[0]) # maybe there are aliases, so check on that if cmdline[0] in permitted_group[1].get('aliases', {}).keys(): use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]].get('cmd', ''), '') # Include any additional elements from cmdline use_cmdline.extend(cmdline[1:]) else: use_cmdline = cmdline target = self.get_target(permitted_group, cmdline, use_cmdline) # Remove target and tgt_type from commandline # that is sent along to Salt use_cmdline = [item for item in use_cmdline if all(not item.startswith(x) for x in ('target', 'tgt_type'))] return (True, target, use_cmdline)
[ "def", "control_message_target", "(", "self", ",", "slack_user_name", ",", "text", ",", "loaded_groups", ",", "trigger_string", ")", ":", "# Trim the trigger string from the front", "# cmdline = _text[1:].split(' ', 1)", "cmdline", "=", "self", ".", "commandline_to_list", "...
Returns a tuple of (target, cmdline,) for the response Raises IndexError if a user can't be looked up from all_slack_users Returns (False, False) if the user doesn't have permission These are returned together because the commandline and the targeting interact with the group config (specifically aliases and targeting configuration) so taking care of them together works out. The cmdline that is returned is the actual list that should be processed by salt, and not the alias.
[ "Returns", "a", "tuple", "of", "(", "target", "cmdline", ")", "for", "the", "response" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L382-L424
train
saltstack/salt
salt/engines/slack.py
SlackClient.message_text
def message_text(self, m_data): ''' Raises ValueError if a value doesn't work out, and TypeError if this isn't a message type ''' if m_data.get('type') != 'message': raise TypeError('This is not a message') # Edited messages have text in message _text = m_data.get('text', None) or m_data.get('message', {}).get('text', None) try: log.info('Message is %s', _text) # this can violate the ascii codec except UnicodeEncodeError as uee: log.warning('Got a message that I could not log. The reason is: %s', uee) # Convert UTF to string _text = salt.utils.json.dumps(_text) _text = salt.utils.yaml.safe_load(_text) if not _text: raise ValueError('_text has no value') return _text
python
def message_text(self, m_data): ''' Raises ValueError if a value doesn't work out, and TypeError if this isn't a message type ''' if m_data.get('type') != 'message': raise TypeError('This is not a message') # Edited messages have text in message _text = m_data.get('text', None) or m_data.get('message', {}).get('text', None) try: log.info('Message is %s', _text) # this can violate the ascii codec except UnicodeEncodeError as uee: log.warning('Got a message that I could not log. The reason is: %s', uee) # Convert UTF to string _text = salt.utils.json.dumps(_text) _text = salt.utils.yaml.safe_load(_text) if not _text: raise ValueError('_text has no value') return _text
[ "def", "message_text", "(", "self", ",", "m_data", ")", ":", "if", "m_data", ".", "get", "(", "'type'", ")", "!=", "'message'", ":", "raise", "TypeError", "(", "'This is not a message'", ")", "# Edited messages have text in message", "_text", "=", "m_data", ".",...
Raises ValueError if a value doesn't work out, and TypeError if this isn't a message type
[ "Raises", "ValueError", "if", "a", "value", "doesn", "t", "work", "out", "and", "TypeError", "if", "this", "isn", "t", "a", "message", "type" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L426-L446
train
saltstack/salt
salt/engines/slack.py
SlackClient.generate_triggered_messages
def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name): ''' slack_token = string trigger_string = string input_valid_users = set input_valid_commands = set When the trigger_string prefixes the message text, yields a dictionary of:: { 'message_data': m_data, 'cmdline': cmdline_list, # this is a list 'channel': channel, 'user': m_data['user'], 'slack_client': sc } else yields {'message_data': m_data} and the caller can handle that When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message When the websocket being read from has given up all its messages, yields {'done': True} to indicate that the caller has read all of the relevant data for now, and should continue its own processing and check back for more data later. This relies on the caller sleeping between checks, otherwise this could flood ''' all_slack_users = self.get_slack_users(token) # re-checks this if we have an negative lookup result all_slack_channels = self.get_slack_channels(token) # re-checks this if we have an negative lookup result def just_data(m_data): '''Always try to return the user and channel anyway''' if 'user' not in m_data: if 'message' in m_data and 'user' in m_data['message']: log.debug('Message was edited, ' 'so we look for user in ' 'the original message.') user_id = m_data['message']['user'] elif 'comment' in m_data and 'user' in m_data['comment']: log.debug('Comment was added, ' 'so we look for user in ' 'the comment.') user_id = m_data['comment']['user'] else: user_id = m_data.get('user') channel_id = m_data.get('channel') if channel_id.startswith('D'): # private chate with bot user channel_name = 'private chat' else: channel_name = all_slack_channels.get(channel_id) data = { 'message_data': m_data, 'user_id': user_id, 'user_name': all_slack_users.get(user_id), 'channel_name': channel_name } if not data['user_name']: all_slack_users.clear() all_slack_users.update(self.get_slack_users(token)) data['user_name'] = all_slack_users.get(user_id) if not data['channel_name']: all_slack_channels.clear() all_slack_channels.update(self.get_slack_channels(token)) data['channel_name'] = all_slack_channels.get(channel_id) return data for sleeps in (5, 10, 30, 60): if self.slack_connect: break else: # see https://api.slack.com/docs/rate-limits log.warning('Slack connection is invalid. Server: %s, sleeping %s', self.sc.server, sleeps) time.sleep(sleeps) # respawning too fast makes the slack API unhappy about the next reconnection else: raise UserWarning('Connection to slack is still invalid, giving up: {}'.format(self.slack_connect)) # Boom! while True: msg = self.sc.rtm_read() for m_data in msg: try: msg_text = self.message_text(m_data) except (ValueError, TypeError) as msg_err: log.debug('Got an error from trying to get the message text %s', msg_err) yield {'message_data': m_data} # Not a message type from the API? continue # Find the channel object from the channel name channel = self.sc.server.channels.find(m_data['channel']) data = just_data(m_data) if msg_text.startswith(trigger_string): loaded_groups = self.get_config_groups(groups, groups_pillar_name) if not data.get('user_name'): log.error( 'The user %s can not be looked up via slack. What has happened here?', m_data.get('user') ) channel.send_message('The user {} can not be looked up via slack. Not running {}'.format( data['user_id'], msg_text)) yield {'message_data': m_data} continue (allowed, target, cmdline) = self.control_message_target( data['user_name'], msg_text, loaded_groups, trigger_string) log.debug('Got target: %s, cmdline: %s', target, cmdline) if allowed: yield { 'message_data': m_data, 'channel': m_data['channel'], 'user': data['user_id'], 'user_name': data['user_name'], 'cmdline': cmdline, 'target': target } continue else: channel.send_message('{0} is not allowed to use command {1}.'.format( data['user_name'], cmdline)) yield data continue else: yield data continue yield {'done': True}
python
def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name): ''' slack_token = string trigger_string = string input_valid_users = set input_valid_commands = set When the trigger_string prefixes the message text, yields a dictionary of:: { 'message_data': m_data, 'cmdline': cmdline_list, # this is a list 'channel': channel, 'user': m_data['user'], 'slack_client': sc } else yields {'message_data': m_data} and the caller can handle that When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message When the websocket being read from has given up all its messages, yields {'done': True} to indicate that the caller has read all of the relevant data for now, and should continue its own processing and check back for more data later. This relies on the caller sleeping between checks, otherwise this could flood ''' all_slack_users = self.get_slack_users(token) # re-checks this if we have an negative lookup result all_slack_channels = self.get_slack_channels(token) # re-checks this if we have an negative lookup result def just_data(m_data): '''Always try to return the user and channel anyway''' if 'user' not in m_data: if 'message' in m_data and 'user' in m_data['message']: log.debug('Message was edited, ' 'so we look for user in ' 'the original message.') user_id = m_data['message']['user'] elif 'comment' in m_data and 'user' in m_data['comment']: log.debug('Comment was added, ' 'so we look for user in ' 'the comment.') user_id = m_data['comment']['user'] else: user_id = m_data.get('user') channel_id = m_data.get('channel') if channel_id.startswith('D'): # private chate with bot user channel_name = 'private chat' else: channel_name = all_slack_channels.get(channel_id) data = { 'message_data': m_data, 'user_id': user_id, 'user_name': all_slack_users.get(user_id), 'channel_name': channel_name } if not data['user_name']: all_slack_users.clear() all_slack_users.update(self.get_slack_users(token)) data['user_name'] = all_slack_users.get(user_id) if not data['channel_name']: all_slack_channels.clear() all_slack_channels.update(self.get_slack_channels(token)) data['channel_name'] = all_slack_channels.get(channel_id) return data for sleeps in (5, 10, 30, 60): if self.slack_connect: break else: # see https://api.slack.com/docs/rate-limits log.warning('Slack connection is invalid. Server: %s, sleeping %s', self.sc.server, sleeps) time.sleep(sleeps) # respawning too fast makes the slack API unhappy about the next reconnection else: raise UserWarning('Connection to slack is still invalid, giving up: {}'.format(self.slack_connect)) # Boom! while True: msg = self.sc.rtm_read() for m_data in msg: try: msg_text = self.message_text(m_data) except (ValueError, TypeError) as msg_err: log.debug('Got an error from trying to get the message text %s', msg_err) yield {'message_data': m_data} # Not a message type from the API? continue # Find the channel object from the channel name channel = self.sc.server.channels.find(m_data['channel']) data = just_data(m_data) if msg_text.startswith(trigger_string): loaded_groups = self.get_config_groups(groups, groups_pillar_name) if not data.get('user_name'): log.error( 'The user %s can not be looked up via slack. What has happened here?', m_data.get('user') ) channel.send_message('The user {} can not be looked up via slack. Not running {}'.format( data['user_id'], msg_text)) yield {'message_data': m_data} continue (allowed, target, cmdline) = self.control_message_target( data['user_name'], msg_text, loaded_groups, trigger_string) log.debug('Got target: %s, cmdline: %s', target, cmdline) if allowed: yield { 'message_data': m_data, 'channel': m_data['channel'], 'user': data['user_id'], 'user_name': data['user_name'], 'cmdline': cmdline, 'target': target } continue else: channel.send_message('{0} is not allowed to use command {1}.'.format( data['user_name'], cmdline)) yield data continue else: yield data continue yield {'done': True}
[ "def", "generate_triggered_messages", "(", "self", ",", "token", ",", "trigger_string", ",", "groups", ",", "groups_pillar_name", ")", ":", "all_slack_users", "=", "self", ".", "get_slack_users", "(", "token", ")", "# re-checks this if we have an negative lookup result", ...
slack_token = string trigger_string = string input_valid_users = set input_valid_commands = set When the trigger_string prefixes the message text, yields a dictionary of:: { 'message_data': m_data, 'cmdline': cmdline_list, # this is a list 'channel': channel, 'user': m_data['user'], 'slack_client': sc } else yields {'message_data': m_data} and the caller can handle that When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message When the websocket being read from has given up all its messages, yields {'done': True} to indicate that the caller has read all of the relevant data for now, and should continue its own processing and check back for more data later. This relies on the caller sleeping between checks, otherwise this could flood
[ "slack_token", "=", "string", "trigger_string", "=", "string", "input_valid_users", "=", "set", "input_valid_commands", "=", "set" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L448-L569
train
saltstack/salt
salt/engines/slack.py
SlackClient.get_target
def get_target(self, permitted_group, cmdline, alias_cmdline): ''' When we are permitted to run a command on a target, look to see what the default targeting is for that group, and for that specific command (if provided). It's possible for None or False to be the result of either, which means that it's expected that the caller provide a specific target. If no configured target is provided, the command line will be parsed for target=foo and tgt_type=bar Test for this:: h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}}, 'users': {'dmangot', 'jmickle', 'pcn'}} f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}} g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}} Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target ''' # Default to targeting all minions with a type of glob null_target = {'target': '*', 'tgt_type': 'glob'} def check_cmd_against_group(cmd): ''' Validate cmd against the group to return the target, or a null target ''' name, group_config = permitted_group target = group_config.get('default_target') if not target: # Empty, None, or False target = null_target if group_config.get('targets'): if group_config['targets'].get(cmd): target = group_config['targets'][cmd] if not target.get('target'): log.debug('Group %s is not configured to have a target for cmd %s.', name, cmd) return target for this_cl in cmdline, alias_cmdline: _, kwargs = self.parse_args_and_kwargs(this_cl) if 'target' in kwargs: log.debug('target is in kwargs %s.', kwargs) if 'tgt_type' in kwargs: log.debug('tgt_type is in kwargs %s.', kwargs) return {'target': kwargs['target'], 'tgt_type': kwargs['tgt_type']} return {'target': kwargs['target'], 'tgt_type': 'glob'} for this_cl in cmdline, alias_cmdline: checked = check_cmd_against_group(this_cl[0]) log.debug('this cmdline has target %s.', this_cl) if checked.get('target'): return checked return null_target
python
def get_target(self, permitted_group, cmdline, alias_cmdline): ''' When we are permitted to run a command on a target, look to see what the default targeting is for that group, and for that specific command (if provided). It's possible for None or False to be the result of either, which means that it's expected that the caller provide a specific target. If no configured target is provided, the command line will be parsed for target=foo and tgt_type=bar Test for this:: h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}}, 'users': {'dmangot', 'jmickle', 'pcn'}} f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}} g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}} Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target ''' # Default to targeting all minions with a type of glob null_target = {'target': '*', 'tgt_type': 'glob'} def check_cmd_against_group(cmd): ''' Validate cmd against the group to return the target, or a null target ''' name, group_config = permitted_group target = group_config.get('default_target') if not target: # Empty, None, or False target = null_target if group_config.get('targets'): if group_config['targets'].get(cmd): target = group_config['targets'][cmd] if not target.get('target'): log.debug('Group %s is not configured to have a target for cmd %s.', name, cmd) return target for this_cl in cmdline, alias_cmdline: _, kwargs = self.parse_args_and_kwargs(this_cl) if 'target' in kwargs: log.debug('target is in kwargs %s.', kwargs) if 'tgt_type' in kwargs: log.debug('tgt_type is in kwargs %s.', kwargs) return {'target': kwargs['target'], 'tgt_type': kwargs['tgt_type']} return {'target': kwargs['target'], 'tgt_type': 'glob'} for this_cl in cmdline, alias_cmdline: checked = check_cmd_against_group(this_cl[0]) log.debug('this cmdline has target %s.', this_cl) if checked.get('target'): return checked return null_target
[ "def", "get_target", "(", "self", ",", "permitted_group", ",", "cmdline", ",", "alias_cmdline", ")", ":", "# Default to targeting all minions with a type of glob", "null_target", "=", "{", "'target'", ":", "'*'", ",", "'tgt_type'", ":", "'glob'", "}", "def", "check_...
When we are permitted to run a command on a target, look to see what the default targeting is for that group, and for that specific command (if provided). It's possible for None or False to be the result of either, which means that it's expected that the caller provide a specific target. If no configured target is provided, the command line will be parsed for target=foo and tgt_type=bar Test for this:: h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}}, 'users': {'dmangot', 'jmickle', 'pcn'}} f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}} g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}} Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target
[ "When", "we", "are", "permitted", "to", "run", "a", "command", "on", "a", "target", "look", "to", "see", "what", "the", "default", "targeting", "is", "for", "that", "group", "and", "for", "that", "specific", "command", "(", "if", "provided", ")", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L571-L631
train
saltstack/salt
salt/engines/slack.py
SlackClient.format_return_text
def format_return_text(self, data, function, **kwargs): # pylint: disable=unused-argument ''' Print out YAML using the block mode ''' # emulate the yaml_out output formatter. It relies on a global __opts__ object which # we can't obviously pass in try: try: outputter = data[next(iter(data))].get('out') except (StopIteration, AttributeError): outputter = None return salt.output.string_format( {x: y['return'] for x, y in six.iteritems(data)}, out=outputter, opts=__opts__, ) except Exception as exc: import pprint log.exception( 'Exception encountered when trying to serialize %s', pprint.pformat(data) ) return 'Got an error trying to serialze/clean up the response'
python
def format_return_text(self, data, function, **kwargs): # pylint: disable=unused-argument ''' Print out YAML using the block mode ''' # emulate the yaml_out output formatter. It relies on a global __opts__ object which # we can't obviously pass in try: try: outputter = data[next(iter(data))].get('out') except (StopIteration, AttributeError): outputter = None return salt.output.string_format( {x: y['return'] for x, y in six.iteritems(data)}, out=outputter, opts=__opts__, ) except Exception as exc: import pprint log.exception( 'Exception encountered when trying to serialize %s', pprint.pformat(data) ) return 'Got an error trying to serialze/clean up the response'
[ "def", "format_return_text", "(", "self", ",", "data", ",", "function", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "# emulate the yaml_out output formatter. It relies on a global __opts__ object which", "# we can't obviously pass in", "try", ":", "t...
Print out YAML using the block mode
[ "Print", "out", "YAML", "using", "the", "block", "mode" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L633-L655
train
saltstack/salt
salt/engines/slack.py
SlackClient.parse_args_and_kwargs
def parse_args_and_kwargs(self, cmdline): ''' cmdline: list returns tuple of: args (list), kwargs (dict) ''' # Parse args and kwargs args = [] kwargs = {} if len(cmdline) > 1: for item in cmdline[1:]: if '=' in item: (key, value) = item.split('=', 1) kwargs[key] = value else: args.append(item) return (args, kwargs)
python
def parse_args_and_kwargs(self, cmdline): ''' cmdline: list returns tuple of: args (list), kwargs (dict) ''' # Parse args and kwargs args = [] kwargs = {} if len(cmdline) > 1: for item in cmdline[1:]: if '=' in item: (key, value) = item.split('=', 1) kwargs[key] = value else: args.append(item) return (args, kwargs)
[ "def", "parse_args_and_kwargs", "(", "self", ",", "cmdline", ")", ":", "# Parse args and kwargs", "args", "=", "[", "]", "kwargs", "=", "{", "}", "if", "len", "(", "cmdline", ")", ">", "1", ":", "for", "item", "in", "cmdline", "[", "1", ":", "]", ":"...
cmdline: list returns tuple of: args (list), kwargs (dict)
[ "cmdline", ":", "list" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L657-L674
train
saltstack/salt
salt/engines/slack.py
SlackClient.get_jobs_from_runner
def get_jobs_from_runner(self, outstanding_jids): ''' Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. Query the salt event bus via the jobs runner. jobs.list_job will show a job in progress, jobs.lookup_jid will return a job that has completed. returns a dictionary of job id: result ''' # Can't use the runner because of https://github.com/saltstack/salt/issues/40671 runner = salt.runner.RunnerClient(__opts__) source = __opts__.get('ext_job_cache') if not source: source = __opts__.get('master_job_cache') results = {} for jid in outstanding_jids: # results[jid] = runner.cmd('jobs.lookup_jid', [jid]) if self.master_minion.returners['{}.get_jid'.format(source)](jid): job_result = runner.cmd('jobs.list_job', [jid]) jid_result = job_result.get('Result', {}) jid_function = job_result.get('Function', {}) # emulate lookup_jid's return, which is just minion:return results[jid] = { 'data': salt.utils.json.loads(salt.utils.json.dumps(jid_result)), 'function': jid_function } return results
python
def get_jobs_from_runner(self, outstanding_jids): ''' Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. Query the salt event bus via the jobs runner. jobs.list_job will show a job in progress, jobs.lookup_jid will return a job that has completed. returns a dictionary of job id: result ''' # Can't use the runner because of https://github.com/saltstack/salt/issues/40671 runner = salt.runner.RunnerClient(__opts__) source = __opts__.get('ext_job_cache') if not source: source = __opts__.get('master_job_cache') results = {} for jid in outstanding_jids: # results[jid] = runner.cmd('jobs.lookup_jid', [jid]) if self.master_minion.returners['{}.get_jid'.format(source)](jid): job_result = runner.cmd('jobs.list_job', [jid]) jid_result = job_result.get('Result', {}) jid_function = job_result.get('Function', {}) # emulate lookup_jid's return, which is just minion:return results[jid] = { 'data': salt.utils.json.loads(salt.utils.json.dumps(jid_result)), 'function': jid_function } return results
[ "def", "get_jobs_from_runner", "(", "self", ",", "outstanding_jids", ")", ":", "# Can't use the runner because of https://github.com/saltstack/salt/issues/40671", "runner", "=", "salt", ".", "runner", ".", "RunnerClient", "(", "__opts__", ")", "source", "=", "__opts__", "...
Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. Query the salt event bus via the jobs runner. jobs.list_job will show a job in progress, jobs.lookup_jid will return a job that has completed. returns a dictionary of job id: result
[ "Given", "a", "list", "of", "job_ids", "return", "a", "dictionary", "of", "those", "job_ids", "that", "have", "completed", "and", "their", "results", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L676-L706
train
saltstack/salt
salt/engines/slack.py
SlackClient.run_commands_from_slack_async
def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1): ''' Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on the values of fire_all and command ''' outstanding = {} # set of job_id that we need to check for while True: log.trace('Sleeping for interval of %s', interval) time.sleep(interval) # Drain the slack messages, up to 10 messages at a clip count = 0 for msg in message_generator: # The message_generator yields dicts. Leave this loop # on a dict that looks like {'done': True} or when we've done it # 10 times without taking a break. log.trace('Got a message from the generator: %s', msg.keys()) if count > 10: log.warning('Breaking in getting messages because count is exceeded') break if not msg: count += 1 log.warning('Skipping an empty message.') continue # This one is a dud, get the next message if msg.get('done'): log.trace('msg is done') break if fire_all: log.debug('Firing message to the bus with tag: %s', tag) log.debug('%s %s', tag, msg) self.fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg) if control and (len(msg) > 1) and msg.get('cmdline'): channel = self.sc.server.channels.find(msg['channel']) jid = self.run_command_async(msg) log.debug('Submitted a job and got jid: %s', jid) outstanding[jid] = msg # record so we can return messages to the caller channel.send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid)) count += 1 start_time = time.time() job_status = self.get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned log.trace('Getting %s jobs status took %s seconds', len(job_status), time.time() - start_time) for jid in job_status: result = job_status[jid]['data'] function = job_status[jid]['function'] if result: log.debug('ret to send back is %s', result) # formatting function? this_job = outstanding[jid] channel = self.sc.server.channels.find(this_job['channel']) return_text = self.format_return_text(result, function) return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format( this_job['user_name'], this_job['cmdline'], jid, this_job['target']) channel.send_message(return_prefix) ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S%f') filename = 'salt-results-{0}.yaml'.format(st) r = self.sc.api_call( 'files.upload', channels=channel.id, filename=filename, content=return_text) # Handle unicode return log.debug('Got back %s via the slack client', r) resp = salt.utils.yaml.safe_load(salt.utils.json.dumps(r)) if 'ok' in resp and resp['ok'] is False: this_job['channel'].send_message('Error: {0}'.format(resp['error'])) del outstanding[jid]
python
def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1): ''' Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on the values of fire_all and command ''' outstanding = {} # set of job_id that we need to check for while True: log.trace('Sleeping for interval of %s', interval) time.sleep(interval) # Drain the slack messages, up to 10 messages at a clip count = 0 for msg in message_generator: # The message_generator yields dicts. Leave this loop # on a dict that looks like {'done': True} or when we've done it # 10 times without taking a break. log.trace('Got a message from the generator: %s', msg.keys()) if count > 10: log.warning('Breaking in getting messages because count is exceeded') break if not msg: count += 1 log.warning('Skipping an empty message.') continue # This one is a dud, get the next message if msg.get('done'): log.trace('msg is done') break if fire_all: log.debug('Firing message to the bus with tag: %s', tag) log.debug('%s %s', tag, msg) self.fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg) if control and (len(msg) > 1) and msg.get('cmdline'): channel = self.sc.server.channels.find(msg['channel']) jid = self.run_command_async(msg) log.debug('Submitted a job and got jid: %s', jid) outstanding[jid] = msg # record so we can return messages to the caller channel.send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid)) count += 1 start_time = time.time() job_status = self.get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned log.trace('Getting %s jobs status took %s seconds', len(job_status), time.time() - start_time) for jid in job_status: result = job_status[jid]['data'] function = job_status[jid]['function'] if result: log.debug('ret to send back is %s', result) # formatting function? this_job = outstanding[jid] channel = self.sc.server.channels.find(this_job['channel']) return_text = self.format_return_text(result, function) return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format( this_job['user_name'], this_job['cmdline'], jid, this_job['target']) channel.send_message(return_prefix) ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S%f') filename = 'salt-results-{0}.yaml'.format(st) r = self.sc.api_call( 'files.upload', channels=channel.id, filename=filename, content=return_text) # Handle unicode return log.debug('Got back %s via the slack client', r) resp = salt.utils.yaml.safe_load(salt.utils.json.dumps(r)) if 'ok' in resp and resp['ok'] is False: this_job['channel'].send_message('Error: {0}'.format(resp['error'])) del outstanding[jid]
[ "def", "run_commands_from_slack_async", "(", "self", ",", "message_generator", ",", "fire_all", ",", "tag", ",", "control", ",", "interval", "=", "1", ")", ":", "outstanding", "=", "{", "}", "# set of job_id that we need to check for", "while", "True", ":", "log",...
Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on the values of fire_all and command
[ "Pull", "any", "pending", "messages", "from", "the", "message_generator", "sending", "each", "one", "to", "either", "the", "event", "bus", "the", "command_async", "or", "both", "depending", "on", "the", "values", "of", "fire_all", "and", "command" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L708-L774
train
saltstack/salt
salt/engines/slack.py
SlackClient.run_command_async
def run_command_async(self, msg): ''' :type message_generator: generator of dict :param message_generator: Generates messages from slack that should be run :type fire_all: bool :param fire_all: Whether to also fire messages to the event bus :type tag: str :param tag: The tag to send to use to send to the event bus :type interval: int :param interval: time to wait between ending a loop and beginning the next ''' log.debug('Going to run a command asynchronous') runner_functions = sorted(salt.runner.Runner(__opts__).functions) # Parse args and kwargs cmd = msg['cmdline'][0] args, kwargs = self.parse_args_and_kwargs(msg['cmdline']) # Check for pillar string representation of dict and convert it to dict if 'pillar' in kwargs: kwargs.update(pillar=ast.literal_eval(kwargs['pillar'])) # Check for target. Otherwise assume None target = msg['target']['target'] # Check for tgt_type. Otherwise assume glob tgt_type = msg['target']['tgt_type'] log.debug('target_type is: %s', tgt_type) if cmd in runner_functions: runner = salt.runner.RunnerClient(__opts__) log.debug('Command %s will run via runner_functions', cmd) # pylint is tripping # pylint: disable=missing-whitespace-after-comma job_id_dict = runner.asynchronous(cmd, {'args': args, 'kwargs': kwargs}) job_id = job_id_dict['jid'] # Default to trying to run as a client module. else: local = salt.client.LocalClient() log.debug('Command %s will run via local.cmd_async, targeting %s', cmd, target) log.debug('Running %s, %s, %s, %s, %s', target, cmd, args, kwargs, tgt_type) # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form job_id = local.cmd_async(six.text_type(target), cmd, arg=args, kwarg=kwargs, tgt_type=six.text_type(tgt_type)) log.info('ret from local.cmd_async is %s', job_id) return job_id
python
def run_command_async(self, msg): ''' :type message_generator: generator of dict :param message_generator: Generates messages from slack that should be run :type fire_all: bool :param fire_all: Whether to also fire messages to the event bus :type tag: str :param tag: The tag to send to use to send to the event bus :type interval: int :param interval: time to wait between ending a loop and beginning the next ''' log.debug('Going to run a command asynchronous') runner_functions = sorted(salt.runner.Runner(__opts__).functions) # Parse args and kwargs cmd = msg['cmdline'][0] args, kwargs = self.parse_args_and_kwargs(msg['cmdline']) # Check for pillar string representation of dict and convert it to dict if 'pillar' in kwargs: kwargs.update(pillar=ast.literal_eval(kwargs['pillar'])) # Check for target. Otherwise assume None target = msg['target']['target'] # Check for tgt_type. Otherwise assume glob tgt_type = msg['target']['tgt_type'] log.debug('target_type is: %s', tgt_type) if cmd in runner_functions: runner = salt.runner.RunnerClient(__opts__) log.debug('Command %s will run via runner_functions', cmd) # pylint is tripping # pylint: disable=missing-whitespace-after-comma job_id_dict = runner.asynchronous(cmd, {'args': args, 'kwargs': kwargs}) job_id = job_id_dict['jid'] # Default to trying to run as a client module. else: local = salt.client.LocalClient() log.debug('Command %s will run via local.cmd_async, targeting %s', cmd, target) log.debug('Running %s, %s, %s, %s, %s', target, cmd, args, kwargs, tgt_type) # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form job_id = local.cmd_async(six.text_type(target), cmd, arg=args, kwarg=kwargs, tgt_type=six.text_type(tgt_type)) log.info('ret from local.cmd_async is %s', job_id) return job_id
[ "def", "run_command_async", "(", "self", ",", "msg", ")", ":", "log", ".", "debug", "(", "'Going to run a command asynchronous'", ")", "runner_functions", "=", "sorted", "(", "salt", ".", "runner", ".", "Runner", "(", "__opts__", ")", ".", "functions", ")", ...
:type message_generator: generator of dict :param message_generator: Generates messages from slack that should be run :type fire_all: bool :param fire_all: Whether to also fire messages to the event bus :type tag: str :param tag: The tag to send to use to send to the event bus :type interval: int :param interval: time to wait between ending a loop and beginning the next
[ ":", "type", "message_generator", ":", "generator", "of", "dict", ":", "param", "message_generator", ":", "Generates", "messages", "from", "slack", "that", "should", "be", "run" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L776-L825
train
saltstack/salt
salt/returners/postgres.py
_get_serv
def _get_serv(ret=None, commit=False): ''' Return a Pg cursor ''' _options = _get_options(ret) try: conn = psycopg2.connect(host=_options.get('host'), user=_options.get('user'), password=_options.get('passwd'), database=_options.get('db'), port=_options.get('port')) except psycopg2.OperationalError as exc: raise salt.exceptions.SaltMasterError('postgres returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") finally: conn.close()
python
def _get_serv(ret=None, commit=False): ''' Return a Pg cursor ''' _options = _get_options(ret) try: conn = psycopg2.connect(host=_options.get('host'), user=_options.get('user'), password=_options.get('passwd'), database=_options.get('db'), port=_options.get('port')) except psycopg2.OperationalError as exc: raise salt.exceptions.SaltMasterError('postgres returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") finally: conn.close()
[ "def", "_get_serv", "(", "ret", "=", "None", ",", "commit", "=", "False", ")", ":", "_options", "=", "_get_options", "(", "ret", ")", "try", ":", "conn", "=", "psycopg2", ".", "connect", "(", "host", "=", "_options", ".", "get", "(", "'host'", ")", ...
Return a Pg cursor
[ "Return", "a", "Pg", "cursor" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/postgres.py#L190-L220
train
saltstack/salt
salt/returners/postgres.py
returner
def returner(ret): ''' Return data to a postgres server ''' try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO salt_returns (fun, jid, return, id, success, full_ret) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute( sql, ( ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError: log.critical('Could not store return with postgres returner. PostgreSQL server unavailable.')
python
def returner(ret): ''' Return data to a postgres server ''' try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO salt_returns (fun, jid, return, id, success, full_ret) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute( sql, ( ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError: log.critical('Could not store return with postgres returner. PostgreSQL server unavailable.')
[ "def", "returner", "(", "ret", ")", ":", "try", ":", "with", "_get_serv", "(", "ret", ",", "commit", "=", "True", ")", "as", "cur", ":", "sql", "=", "'''INSERT INTO salt_returns\n (fun, jid, return, id, success, full_ret)\n VALUES (%s...
Return data to a postgres server
[ "Return", "data", "to", "a", "postgres", "server" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/postgres.py#L223-L241
train
saltstack/salt
salt/returners/postgres.py
get_jids
def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT jid, load FROM jids''' cur.execute(sql) data = cur.fetchall() ret = {} for jid, load in data: ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret
python
def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT jid, load FROM jids''' cur.execute(sql) data = cur.fetchall() ret = {} for jid, load in data: ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret
[ "def", "get_jids", "(", ")", ":", "with", "_get_serv", "(", "ret", "=", "None", ",", "commit", "=", "True", ")", "as", "cur", ":", "sql", "=", "'''SELECT jid, load\n FROM jids'''", "cur", ".", "execute", "(", "sql", ")", "data", "=", "cur", ...
Return a list of all job ids
[ "Return", "a", "list", "of", "all", "job", "ids" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/postgres.py#L344-L359
train
saltstack/salt
salt/states/zabbix_hostgroup.py
present
def present(name, **kwargs): ''' Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name' ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_hostgroup_created = 'Host group {0} created.'.format(name) comment_hostgroup_notcreated = 'Unable to create host group: {0}. '.format(name) comment_hostgroup_exists = 'Host group {0} already exists.'.format(name) changes_hostgroup_created = {name: {'old': 'Host group {0} does not exist.'.format(name), 'new': 'Host group {0} created.'.format(name), } } hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: ret['result'] = None ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created return ret if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: hostgroup_create = __salt__['zabbix.hostgroup_create'](name, **connection_args) if 'error' not in hostgroup_create: ret['result'] = True ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created else: ret['result'] = False ret['comment'] = comment_hostgroup_notcreated + six.text_type(hostgroup_create['error']) return ret
python
def present(name, **kwargs): ''' Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name' ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_hostgroup_created = 'Host group {0} created.'.format(name) comment_hostgroup_notcreated = 'Unable to create host group: {0}. '.format(name) comment_hostgroup_exists = 'Host group {0} already exists.'.format(name) changes_hostgroup_created = {name: {'old': 'Host group {0} does not exist.'.format(name), 'new': 'Host group {0} created.'.format(name), } } hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: ret['result'] = None ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created return ret if hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_exists else: hostgroup_create = __salt__['zabbix.hostgroup_create'](name, **connection_args) if 'error' not in hostgroup_create: ret['result'] = True ret['comment'] = comment_hostgroup_created ret['changes'] = changes_hostgroup_created else: ret['result'] = False ret['comment'] = comment_hostgroup_notcreated + six.text_type(hostgroup_create['error']) return ret
[ "def", "present", "(", "name", ",", "*", "*", "kwargs", ")", ":", "connection_args", "=", "{", "}", "if", "'_connection_user'", "in", "kwargs", ":", "connection_args", "[", "'_connection_user'", "]", "=", "kwargs", "[", "'_connection_user'", "]", "if", "'_co...
Ensures that the host group exists, eventually creates new host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml create_testing_host_group: zabbix_hostgroup.present: - name: 'My hostgroup name'
[ "Ensures", "that", "the", "host", "group", "exists", "eventually", "creates", "new", "host", "group", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zabbix_hostgroup.py#L24-L88
train
saltstack/salt
salt/states/zabbix_hostgroup.py
absent
def absent(name, **kwargs): ''' Ensures that the host group does not exist, eventually delete host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_testing_host_group: zabbix_hostgroup.absent: - name: 'My hostgroup name' ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_hostgroup_deleted = 'Host group {0} deleted.'.format(name) comment_hostgroup_notdeleted = 'Unable to delete host group: {0}. '.format(name) comment_hostgroup_notexists = 'Host group {0} does not exist.'.format(name) changes_hostgroup_deleted = {name: {'old': 'Host group {0} exists.'.format(name), 'new': 'Host group {0} deleted.'.format(name), } } connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if not hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_notexists else: ret['result'] = None ret['comment'] = comment_hostgroup_deleted ret['changes'] = changes_hostgroup_deleted return ret hostgroup_get = __salt__['zabbix.hostgroup_get'](name, **connection_args) if not hostgroup_get: ret['result'] = True ret['comment'] = comment_hostgroup_notexists else: try: groupid = hostgroup_get[0]['groupid'] hostgroup_delete = __salt__['zabbix.hostgroup_delete'](groupid, **connection_args) except KeyError: hostgroup_delete = False if hostgroup_delete and 'error' not in hostgroup_delete: ret['result'] = True ret['comment'] = comment_hostgroup_deleted ret['changes'] = changes_hostgroup_deleted else: ret['result'] = False ret['comment'] = comment_hostgroup_notdeleted + six.text_type(hostgroup_delete['error']) return ret
python
def absent(name, **kwargs): ''' Ensures that the host group does not exist, eventually delete host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_testing_host_group: zabbix_hostgroup.absent: - name: 'My hostgroup name' ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_hostgroup_deleted = 'Host group {0} deleted.'.format(name) comment_hostgroup_notdeleted = 'Unable to delete host group: {0}. '.format(name) comment_hostgroup_notexists = 'Host group {0} does not exist.'.format(name) changes_hostgroup_deleted = {name: {'old': 'Host group {0} exists.'.format(name), 'new': 'Host group {0} deleted.'.format(name), } } connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if not hostgroup_exists: ret['result'] = True ret['comment'] = comment_hostgroup_notexists else: ret['result'] = None ret['comment'] = comment_hostgroup_deleted ret['changes'] = changes_hostgroup_deleted return ret hostgroup_get = __salt__['zabbix.hostgroup_get'](name, **connection_args) if not hostgroup_get: ret['result'] = True ret['comment'] = comment_hostgroup_notexists else: try: groupid = hostgroup_get[0]['groupid'] hostgroup_delete = __salt__['zabbix.hostgroup_delete'](groupid, **connection_args) except KeyError: hostgroup_delete = False if hostgroup_delete and 'error' not in hostgroup_delete: ret['result'] = True ret['comment'] = comment_hostgroup_deleted ret['changes'] = changes_hostgroup_deleted else: ret['result'] = False ret['comment'] = comment_hostgroup_notdeleted + six.text_type(hostgroup_delete['error']) return ret
[ "def", "absent", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "# Comment and change messages", "comment_hostgroup...
Ensures that the host group does not exist, eventually delete host group. .. versionadded:: 2016.3.0 :param name: name of the host group :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_testing_host_group: zabbix_hostgroup.absent: - name: 'My hostgroup name'
[ "Ensures", "that", "the", "host", "group", "does", "not", "exist", "eventually", "delete", "host", "group", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zabbix_hostgroup.py#L91-L162
train
saltstack/salt
salt/states/glassfish.py
_json_to_unicode
def _json_to_unicode(data): ''' Encode json values in unicode to match that of the API ''' ret = {} for key, value in data.items(): if not isinstance(value, six.text_type): if isinstance(value, dict): ret[key] = _json_to_unicode(value) else: ret[key] = six.text_type(value).lower() else: ret[key] = value return ret
python
def _json_to_unicode(data): ''' Encode json values in unicode to match that of the API ''' ret = {} for key, value in data.items(): if not isinstance(value, six.text_type): if isinstance(value, dict): ret[key] = _json_to_unicode(value) else: ret[key] = six.text_type(value).lower() else: ret[key] = value return ret
[ "def", "_json_to_unicode", "(", "data", ")", ":", "ret", "=", "{", "}", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "if", "isinstance", "(",...
Encode json values in unicode to match that of the API
[ "Encode", "json", "values", "in", "unicode", "to", "match", "that", "of", "the", "API" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L36-L49
train
saltstack/salt
salt/states/glassfish.py
_is_updated
def _is_updated(old_conf, new_conf): ''' Compare the API results to the current statefile data ''' changed = {} # Dirty json hacking to get parameters in the same format new_conf = _json_to_unicode(salt.utils.json.loads( salt.utils.json.dumps(new_conf, ensure_ascii=False))) old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False)) for key, value in old_conf.items(): oldval = six.text_type(value).lower() if key in new_conf: newval = six.text_type(new_conf[key]).lower() if oldval == 'null' or oldval == 'none': oldval = '' if key in new_conf and newval != oldval: changed[key] = {'old': oldval, 'new': newval} return changed
python
def _is_updated(old_conf, new_conf): ''' Compare the API results to the current statefile data ''' changed = {} # Dirty json hacking to get parameters in the same format new_conf = _json_to_unicode(salt.utils.json.loads( salt.utils.json.dumps(new_conf, ensure_ascii=False))) old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False)) for key, value in old_conf.items(): oldval = six.text_type(value).lower() if key in new_conf: newval = six.text_type(new_conf[key]).lower() if oldval == 'null' or oldval == 'none': oldval = '' if key in new_conf and newval != oldval: changed[key] = {'old': oldval, 'new': newval} return changed
[ "def", "_is_updated", "(", "old_conf", ",", "new_conf", ")", ":", "changed", "=", "{", "}", "# Dirty json hacking to get parameters in the same format", "new_conf", "=", "_json_to_unicode", "(", "salt", ".", "utils", ".", "json", ".", "loads", "(", "salt", ".", ...
Compare the API results to the current statefile data
[ "Compare", "the", "API", "results", "to", "the", "current", "statefile", "data" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L52-L71
train
saltstack/salt
salt/states/glassfish.py
_do_element_present
def _do_element_present(name, elem_type, data, server=None): ''' Generic function to create or update an element ''' ret = {'changes': {}, 'update': False, 'create': False, 'error': None} try: elements = __salt__['glassfish.enum_{0}'.format(elem_type)]() except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = {'Name': name, 'Params': data} ret['create'] = True return ret else: ret['error'] = "Can't connect to the server" return ret if not elements or name not in elements: ret['changes'] = {'Name': name, 'Params': data} ret['create'] = True if not __opts__['test']: try: __salt__['glassfish.create_{0}'.format(elem_type)](name, server=server, **data) except CommandExecutionError as error: ret['error'] = error return ret elif elements and any(data): current_data = __salt__['glassfish.get_{0}'.format(elem_type)](name, server=server) data_diff = _is_updated(current_data, data) if data_diff: ret['update'] = True ret['changes'] = data_diff if not __opts__['test']: try: __salt__['glassfish.update_{0}'.format(elem_type)](name, server=server, **data) except CommandExecutionError as error: ret['error'] = error return ret
python
def _do_element_present(name, elem_type, data, server=None): ''' Generic function to create or update an element ''' ret = {'changes': {}, 'update': False, 'create': False, 'error': None} try: elements = __salt__['glassfish.enum_{0}'.format(elem_type)]() except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = {'Name': name, 'Params': data} ret['create'] = True return ret else: ret['error'] = "Can't connect to the server" return ret if not elements or name not in elements: ret['changes'] = {'Name': name, 'Params': data} ret['create'] = True if not __opts__['test']: try: __salt__['glassfish.create_{0}'.format(elem_type)](name, server=server, **data) except CommandExecutionError as error: ret['error'] = error return ret elif elements and any(data): current_data = __salt__['glassfish.get_{0}'.format(elem_type)](name, server=server) data_diff = _is_updated(current_data, data) if data_diff: ret['update'] = True ret['changes'] = data_diff if not __opts__['test']: try: __salt__['glassfish.update_{0}'.format(elem_type)](name, server=server, **data) except CommandExecutionError as error: ret['error'] = error return ret
[ "def", "_do_element_present", "(", "name", ",", "elem_type", ",", "data", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'changes'", ":", "{", "}", ",", "'update'", ":", "False", ",", "'create'", ":", "False", ",", "'error'", ":", "None", "}...
Generic function to create or update an element
[ "Generic", "function", "to", "create", "or", "update", "an", "element" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L74-L110
train
saltstack/salt
salt/states/glassfish.py
_do_element_absent
def _do_element_absent(name, elem_type, data, server=None): ''' Generic function to delete an element ''' ret = {'delete': False, 'error': None} try: elements = __salt__['glassfish.enum_{0}'.format(elem_type)]() except requests.ConnectionError as error: if __opts__['test']: ret['create'] = True return ret else: ret['error'] = "Can't connect to the server" return ret if elements and name in elements: ret['delete'] = True if not __opts__['test']: try: __salt__['glassfish.delete_{0}'.format(elem_type)](name, server=server, **data) except CommandExecutionError as error: ret['error'] = error return ret
python
def _do_element_absent(name, elem_type, data, server=None): ''' Generic function to delete an element ''' ret = {'delete': False, 'error': None} try: elements = __salt__['glassfish.enum_{0}'.format(elem_type)]() except requests.ConnectionError as error: if __opts__['test']: ret['create'] = True return ret else: ret['error'] = "Can't connect to the server" return ret if elements and name in elements: ret['delete'] = True if not __opts__['test']: try: __salt__['glassfish.delete_{0}'.format(elem_type)](name, server=server, **data) except CommandExecutionError as error: ret['error'] = error return ret
[ "def", "_do_element_absent", "(", "name", ",", "elem_type", ",", "data", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'delete'", ":", "False", ",", "'error'", ":", "None", "}", "try", ":", "elements", "=", "__salt__", "[", "'glassfish.enum_{0}...
Generic function to delete an element
[ "Generic", "function", "to", "delete", "an", "element" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L113-L135
train
saltstack/salt
salt/states/glassfish.py
connection_factory_present
def connection_factory_present(name, restype='connection_factory', description='', enabled=True, min_size=1, max_size=250, resize_quantity=2, idle_timeout=300, wait_timeout=60, reconnect_on_failure=False, transaction_support='', connection_validation=False, server=None): ''' Ensures that the Connection Factory is present name Name of the connection factory restype Type of the connection factory, can be either ``connection_factory``, ``queue_connection_factory` or ``topic_connection_factory``, defaults to ``connection_factory`` description Description of the connection factory enabled Is the connection factory enabled? defaults to ``true`` min_size Minimum and initial number of connections in the pool, defaults to ``1`` max_size Maximum number of connections that can be created in the pool, defaults to ``250`` resize_quantity Number of connections to be removed when idle_timeout expires, defaults to ``2`` idle_timeout Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300`` wait_timeout Maximum time a caller can wait before timeout, in seconds, defaults to ``60`` reconnect_on_failure Close all connections and reconnect on failure (or reconnect only when used), defaults to ``false`` transaction_support Level of transaction support, can be either ``XATransaction``, ``LocalTransaction`` or ``NoTransaction`` connection_validation Connection validation is required, defaults to ``false`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} # Manage parameters pool_data = {} res_data = {} pool_name = '{0}-Connection-Pool'.format(name) if restype == 'topic_connection_factory': pool_data['connectionDefinitionName'] = 'javax.jms.TopicConnectionFactory' elif restype == 'queue_connection_factory': pool_data['connectionDefinitionName'] = 'javax.jms.QueueConnectionFactory' elif restype == 'connection_factory': pool_data['connectionDefinitionName'] = 'javax.jms.ConnectionFactory' else: ret['result'] = False ret['comment'] = 'Invalid restype' return ret pool_data['description'] = description res_data['description'] = description res_data['enabled'] = enabled res_data['poolName'] = pool_name pool_data['steadyPoolSize'] = min_size pool_data['maxPoolSize'] = max_size pool_data['poolResizeQuantity'] = resize_quantity pool_data['idleTimeoutInSeconds'] = idle_timeout pool_data['maxWaitTimeInMillis'] = wait_timeout*1000 pool_data['failAllConnections'] = reconnect_on_failure if transaction_support: if transaction_support == 'xa_transaction': pool_data['transactionSupport'] = 'XATransaction' elif transaction_support == 'local_transaction': pool_data['transactionSupport'] = 'LocalTransaction' elif transaction_support == 'no_transaction': pool_data['transactionSupport'] = 'NoTransaction' else: ret['result'] = False ret['comment'] = 'Invalid transaction_support' return ret pool_data['isConnectionValidationRequired'] = connection_validation pool_ret = _do_element_present(pool_name, 'connector_c_pool', pool_data, server) res_ret = _do_element_present(name, 'connector_resource', res_data, server) if not pool_ret['error'] and not res_ret['error']: if not __opts__['test']: ret['result'] = True if pool_ret['create'] or res_ret['create']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'Connection factory set to be created' else: ret['comment'] = 'Connection factory created' elif pool_ret['update'] or res_ret['update']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'Connection factory set to be updated' else: ret['comment'] = 'Connection factory updated' else: ret['result'] = True ret['changes'] = {} ret['comment'] = 'Connection factory is already up-to-date' else: ret['result'] = False ret['comment'] = 'ERROR: {0} // {1}'.format(pool_ret['error'], res_ret['error']) return ret
python
def connection_factory_present(name, restype='connection_factory', description='', enabled=True, min_size=1, max_size=250, resize_quantity=2, idle_timeout=300, wait_timeout=60, reconnect_on_failure=False, transaction_support='', connection_validation=False, server=None): ''' Ensures that the Connection Factory is present name Name of the connection factory restype Type of the connection factory, can be either ``connection_factory``, ``queue_connection_factory` or ``topic_connection_factory``, defaults to ``connection_factory`` description Description of the connection factory enabled Is the connection factory enabled? defaults to ``true`` min_size Minimum and initial number of connections in the pool, defaults to ``1`` max_size Maximum number of connections that can be created in the pool, defaults to ``250`` resize_quantity Number of connections to be removed when idle_timeout expires, defaults to ``2`` idle_timeout Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300`` wait_timeout Maximum time a caller can wait before timeout, in seconds, defaults to ``60`` reconnect_on_failure Close all connections and reconnect on failure (or reconnect only when used), defaults to ``false`` transaction_support Level of transaction support, can be either ``XATransaction``, ``LocalTransaction`` or ``NoTransaction`` connection_validation Connection validation is required, defaults to ``false`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} # Manage parameters pool_data = {} res_data = {} pool_name = '{0}-Connection-Pool'.format(name) if restype == 'topic_connection_factory': pool_data['connectionDefinitionName'] = 'javax.jms.TopicConnectionFactory' elif restype == 'queue_connection_factory': pool_data['connectionDefinitionName'] = 'javax.jms.QueueConnectionFactory' elif restype == 'connection_factory': pool_data['connectionDefinitionName'] = 'javax.jms.ConnectionFactory' else: ret['result'] = False ret['comment'] = 'Invalid restype' return ret pool_data['description'] = description res_data['description'] = description res_data['enabled'] = enabled res_data['poolName'] = pool_name pool_data['steadyPoolSize'] = min_size pool_data['maxPoolSize'] = max_size pool_data['poolResizeQuantity'] = resize_quantity pool_data['idleTimeoutInSeconds'] = idle_timeout pool_data['maxWaitTimeInMillis'] = wait_timeout*1000 pool_data['failAllConnections'] = reconnect_on_failure if transaction_support: if transaction_support == 'xa_transaction': pool_data['transactionSupport'] = 'XATransaction' elif transaction_support == 'local_transaction': pool_data['transactionSupport'] = 'LocalTransaction' elif transaction_support == 'no_transaction': pool_data['transactionSupport'] = 'NoTransaction' else: ret['result'] = False ret['comment'] = 'Invalid transaction_support' return ret pool_data['isConnectionValidationRequired'] = connection_validation pool_ret = _do_element_present(pool_name, 'connector_c_pool', pool_data, server) res_ret = _do_element_present(name, 'connector_resource', res_data, server) if not pool_ret['error'] and not res_ret['error']: if not __opts__['test']: ret['result'] = True if pool_ret['create'] or res_ret['create']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'Connection factory set to be created' else: ret['comment'] = 'Connection factory created' elif pool_ret['update'] or res_ret['update']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'Connection factory set to be updated' else: ret['comment'] = 'Connection factory updated' else: ret['result'] = True ret['changes'] = {} ret['comment'] = 'Connection factory is already up-to-date' else: ret['result'] = False ret['comment'] = 'ERROR: {0} // {1}'.format(pool_ret['error'], res_ret['error']) return ret
[ "def", "connection_factory_present", "(", "name", ",", "restype", "=", "'connection_factory'", ",", "description", "=", "''", ",", "enabled", "=", "True", ",", "min_size", "=", "1", ",", "max_size", "=", "250", ",", "resize_quantity", "=", "2", ",", "idle_ti...
Ensures that the Connection Factory is present name Name of the connection factory restype Type of the connection factory, can be either ``connection_factory``, ``queue_connection_factory` or ``topic_connection_factory``, defaults to ``connection_factory`` description Description of the connection factory enabled Is the connection factory enabled? defaults to ``true`` min_size Minimum and initial number of connections in the pool, defaults to ``1`` max_size Maximum number of connections that can be created in the pool, defaults to ``250`` resize_quantity Number of connections to be removed when idle_timeout expires, defaults to ``2`` idle_timeout Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300`` wait_timeout Maximum time a caller can wait before timeout, in seconds, defaults to ``60`` reconnect_on_failure Close all connections and reconnect on failure (or reconnect only when used), defaults to ``false`` transaction_support Level of transaction support, can be either ``XATransaction``, ``LocalTransaction`` or ``NoTransaction`` connection_validation Connection validation is required, defaults to ``false``
[ "Ensures", "that", "the", "Connection", "Factory", "is", "present" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L138-L259
train
saltstack/salt
salt/states/glassfish.py
connection_factory_absent
def connection_factory_absent(name, both=True, server=None): ''' Ensures the transaction factory is absent. name Name of the connection factory both Delete both the pool and the resource, defaults to ``true`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} pool_name = '{0}-Connection-Pool'.format(name) pool_ret = _do_element_absent(pool_name, 'connector_c_pool', {'cascade': both}, server) if not pool_ret['error']: if __opts__['test'] and pool_ret['delete']: ret['comment'] = 'Connection Factory set to be deleted' elif pool_ret['delete']: ret['result'] = True ret['comment'] = 'Connection Factory deleted' else: ret['result'] = True ret['comment'] = 'Connection Factory doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(pool_ret['error']) return ret
python
def connection_factory_absent(name, both=True, server=None): ''' Ensures the transaction factory is absent. name Name of the connection factory both Delete both the pool and the resource, defaults to ``true`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} pool_name = '{0}-Connection-Pool'.format(name) pool_ret = _do_element_absent(pool_name, 'connector_c_pool', {'cascade': both}, server) if not pool_ret['error']: if __opts__['test'] and pool_ret['delete']: ret['comment'] = 'Connection Factory set to be deleted' elif pool_ret['delete']: ret['result'] = True ret['comment'] = 'Connection Factory deleted' else: ret['result'] = True ret['comment'] = 'Connection Factory doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(pool_ret['error']) return ret
[ "def", "connection_factory_absent", "(", "name", ",", "both", "=", "True", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "None", ",", "'changes'", ":", "{", "}", "}"...
Ensures the transaction factory is absent. name Name of the connection factory both Delete both the pool and the resource, defaults to ``true``
[ "Ensures", "the", "transaction", "factory", "is", "absent", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L262-L288
train
saltstack/salt
salt/states/glassfish.py
destination_present
def destination_present(name, physical, restype='queue', description='', enabled=True, server=None): ''' Ensures that the JMS Destination Resource (queue or topic) is present name The JMS Queue/Topic name physical The Physical destination name restype The JMS Destination resource type, either ``queue`` or ``topic``, defaults is ``queue`` description A description of the resource enabled Defaults to ``True`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} params = {} # Set parameters dict if restype == 'queue': params['resType'] = 'javax.jms.Queue' params['className'] = 'com.sun.messaging.Queue' elif restype == 'topic': params['resType'] = 'javax.jms.Topic' params['className'] = 'com.sun.messaging.Topic' else: ret['result'] = False ret['comment'] = 'Invalid restype' return ret params['properties'] = {'Name': physical} params['description'] = description params['enabled'] = enabled jms_ret = _do_element_present(name, 'admin_object_resource', params, server) if not jms_ret['error']: if not __opts__['test']: ret['result'] = True if jms_ret['create'] and __opts__['test']: ret['comment'] = 'JMS Queue set to be created' elif jms_ret['create']: ret['changes'] = jms_ret['changes'] ret['comment'] = 'JMS queue created' elif jms_ret['update'] and __opts__['test']: ret['comment'] = 'JMS Queue set to be updated' elif jms_ret['update']: ret['changes'] = jms_ret['changes'] ret['comment'] = 'JMS Queue updated' else: ret['result'] = True ret['comment'] = 'JMS Queue already up-to-date' else: ret['result'] = False ret['comment'] = 'Error from API: {0}'.format(jms_ret['error']) return ret
python
def destination_present(name, physical, restype='queue', description='', enabled=True, server=None): ''' Ensures that the JMS Destination Resource (queue or topic) is present name The JMS Queue/Topic name physical The Physical destination name restype The JMS Destination resource type, either ``queue`` or ``topic``, defaults is ``queue`` description A description of the resource enabled Defaults to ``True`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} params = {} # Set parameters dict if restype == 'queue': params['resType'] = 'javax.jms.Queue' params['className'] = 'com.sun.messaging.Queue' elif restype == 'topic': params['resType'] = 'javax.jms.Topic' params['className'] = 'com.sun.messaging.Topic' else: ret['result'] = False ret['comment'] = 'Invalid restype' return ret params['properties'] = {'Name': physical} params['description'] = description params['enabled'] = enabled jms_ret = _do_element_present(name, 'admin_object_resource', params, server) if not jms_ret['error']: if not __opts__['test']: ret['result'] = True if jms_ret['create'] and __opts__['test']: ret['comment'] = 'JMS Queue set to be created' elif jms_ret['create']: ret['changes'] = jms_ret['changes'] ret['comment'] = 'JMS queue created' elif jms_ret['update'] and __opts__['test']: ret['comment'] = 'JMS Queue set to be updated' elif jms_ret['update']: ret['changes'] = jms_ret['changes'] ret['comment'] = 'JMS Queue updated' else: ret['result'] = True ret['comment'] = 'JMS Queue already up-to-date' else: ret['result'] = False ret['comment'] = 'Error from API: {0}'.format(jms_ret['error']) return ret
[ "def", "destination_present", "(", "name", ",", "physical", ",", "restype", "=", "'queue'", ",", "description", "=", "''", ",", "enabled", "=", "True", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", ...
Ensures that the JMS Destination Resource (queue or topic) is present name The JMS Queue/Topic name physical The Physical destination name restype The JMS Destination resource type, either ``queue`` or ``topic``, defaults is ``queue`` description A description of the resource enabled Defaults to ``True``
[ "Ensures", "that", "the", "JMS", "Destination", "Resource", "(", "queue", "or", "topic", ")", "is", "present" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L291-L353
train
saltstack/salt
salt/states/glassfish.py
destination_absent
def destination_absent(name, server=None): ''' Ensures that the JMS Destination doesn't exists name Name of the JMS Destination ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} jms_ret = _do_element_absent(name, 'admin_object_resource', {}, server) if not jms_ret['error']: if __opts__['test'] and jms_ret['delete']: ret['comment'] = 'JMS Queue set to be deleted' elif jms_ret['delete']: ret['result'] = True ret['comment'] = 'JMS Queue deleted' else: ret['result'] = True ret['comment'] = 'JMS Queue doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(jms_ret['error']) return ret
python
def destination_absent(name, server=None): ''' Ensures that the JMS Destination doesn't exists name Name of the JMS Destination ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} jms_ret = _do_element_absent(name, 'admin_object_resource', {}, server) if not jms_ret['error']: if __opts__['test'] and jms_ret['delete']: ret['comment'] = 'JMS Queue set to be deleted' elif jms_ret['delete']: ret['result'] = True ret['comment'] = 'JMS Queue deleted' else: ret['result'] = True ret['comment'] = 'JMS Queue doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(jms_ret['error']) return ret
[ "def", "destination_absent", "(", "name", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "None", ",", "'changes'", ":", "{", "}", "}", "jms_ret", "=", "_do_element_abse...
Ensures that the JMS Destination doesn't exists name Name of the JMS Destination
[ "Ensures", "that", "the", "JMS", "Destination", "doesn", "t", "exists" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L356-L377
train
saltstack/salt
salt/states/glassfish.py
jdbc_datasource_present
def jdbc_datasource_present(name, description='', enabled=True, restype='datasource', vendor='mysql', sql_url='', sql_user='', sql_password='', min_size=8, max_size=32, resize_quantity=2, idle_timeout=300, wait_timeout=60, non_transactional=False, transaction_isolation='', isolation_guaranteed=True, server=None): ''' Ensures that the JDBC Datasource exists name Name of the datasource description Description of the datasource enabled Is the datasource enabled? defaults to ``true`` restype Resource type, can be ``datasource``, ``xa_datasource``, ``connection_pool_datasource`` or ``driver``, defaults to ``datasource`` vendor SQL Server type, currently supports ``mysql``, ``postgresql`` and ``mssql``, defaults to ``mysql`` sql_url URL of the server in jdbc form sql_user Username for the server sql_password Password for that username min_size Minimum and initial number of connections in the pool, defaults to ``8`` max_size Maximum number of connections that can be created in the pool, defaults to ``32`` resize_quantity Number of connections to be removed when idle_timeout expires, defaults to ``2`` idle_timeout Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300`` wait_timeout Maximum time a caller can wait before timeout, in seconds, defaults to ``60`` non_transactional Return non-transactional connections transaction_isolation Defaults to the JDBC driver default isolation_guaranteed All connections use the same isolation level ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} # Manage parameters res_name = 'jdbc/{0}'.format(name) pool_data = {} pool_data_properties = {} res_data = {} if restype == 'datasource': pool_data['resType'] = 'javax.sql.DataSource' elif restype == 'xa_datasource': pool_data['resType'] = 'javax.sql.XADataSource' elif restype == 'connection_pool_datasource': pool_data['resType'] = 'javax.sql.ConnectionPoolDataSource' elif restype == 'driver': pool_data['resType'] = 'javax.sql.Driver' datasources = {} datasources['mysql'] = { 'driver': 'com.mysql.jdbc.Driver', 'datasource': 'com.mysql.jdbc.jdbc2.optional.MysqlDataSource', 'xa_datasource': 'com.mysql.jdbc.jdbc2.optional.MysqlXADataSource', 'connection_pool_datasource': 'com.mysql.jdbc.jdbc2.optional.MysqlConnectionPoolDataSource' } datasources['postgresql'] = { 'driver': 'org.postgresql.Driver', 'datasource': 'org.postgresql.ds.PGSimpleDataSource', 'xa_datasource': 'org.postgresql.xa.PGXADataSource', 'connection_pool_datasource': 'org.postgresql.ds.PGConnectionPoolDataSource' } datasources['mssql'] = { 'driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver', 'datasource': 'com.microsoft.sqlserver.jdbc.SQLServerDataSource', 'xa_datasource': 'com.microsoft.sqlserver.jdbc.SQLServerXADataSource', 'connection_pool_datasource': 'com.microsoft.sqlserver.jdbc.SQLServerConnectionPoolDataSource' } if restype == 'driver': pool_data['driverClassname'] = datasources[vendor]['driver'] else: pool_data['datasourceClassname'] = datasources[vendor][restype] pool_data_properties['url'] = sql_url pool_data_properties['user'] = sql_user pool_data_properties['password'] = sql_password pool_data['properties'] = pool_data_properties pool_data['description'] = description res_data['description'] = description res_data['poolName'] = name res_data['enabled'] = enabled pool_data['steadyPoolSize'] = min_size pool_data['maxPoolSize'] = max_size pool_data['poolResizeQuantity'] = resize_quantity pool_data['idleTimeoutInSeconds'] = idle_timeout pool_data['maxWaitTimeInMillis'] = wait_timeout*1000 pool_data['nonTransactionalConnections'] = non_transactional pool_data['transactionIsolationLevel'] = transaction_isolation pool_data['isIsolationLevelGuaranteed'] = isolation_guaranteed pool_ret = _do_element_present(name, 'jdbc_connection_pool', pool_data, server) res_ret = _do_element_present(res_name, 'jdbc_resource', res_data, server) if not pool_ret['error'] and not res_ret['error']: if not __opts__['test']: ret['result'] = True if pool_ret['create'] or res_ret['create']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'JDBC Datasource set to be created' else: ret['comment'] = 'JDBC Datasource created' elif pool_ret['update'] or res_ret['update']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'JDBC Datasource set to be updated' else: ret['comment'] = 'JDBC Datasource updated' else: ret['result'] = True ret['changes'] = {} ret['comment'] = 'JDBC Datasource is already up-to-date' else: ret['result'] = False ret['comment'] = 'ERROR: {0} // {1}'.format(pool_ret['error'], res_ret['error']) return ret
python
def jdbc_datasource_present(name, description='', enabled=True, restype='datasource', vendor='mysql', sql_url='', sql_user='', sql_password='', min_size=8, max_size=32, resize_quantity=2, idle_timeout=300, wait_timeout=60, non_transactional=False, transaction_isolation='', isolation_guaranteed=True, server=None): ''' Ensures that the JDBC Datasource exists name Name of the datasource description Description of the datasource enabled Is the datasource enabled? defaults to ``true`` restype Resource type, can be ``datasource``, ``xa_datasource``, ``connection_pool_datasource`` or ``driver``, defaults to ``datasource`` vendor SQL Server type, currently supports ``mysql``, ``postgresql`` and ``mssql``, defaults to ``mysql`` sql_url URL of the server in jdbc form sql_user Username for the server sql_password Password for that username min_size Minimum and initial number of connections in the pool, defaults to ``8`` max_size Maximum number of connections that can be created in the pool, defaults to ``32`` resize_quantity Number of connections to be removed when idle_timeout expires, defaults to ``2`` idle_timeout Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300`` wait_timeout Maximum time a caller can wait before timeout, in seconds, defaults to ``60`` non_transactional Return non-transactional connections transaction_isolation Defaults to the JDBC driver default isolation_guaranteed All connections use the same isolation level ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} # Manage parameters res_name = 'jdbc/{0}'.format(name) pool_data = {} pool_data_properties = {} res_data = {} if restype == 'datasource': pool_data['resType'] = 'javax.sql.DataSource' elif restype == 'xa_datasource': pool_data['resType'] = 'javax.sql.XADataSource' elif restype == 'connection_pool_datasource': pool_data['resType'] = 'javax.sql.ConnectionPoolDataSource' elif restype == 'driver': pool_data['resType'] = 'javax.sql.Driver' datasources = {} datasources['mysql'] = { 'driver': 'com.mysql.jdbc.Driver', 'datasource': 'com.mysql.jdbc.jdbc2.optional.MysqlDataSource', 'xa_datasource': 'com.mysql.jdbc.jdbc2.optional.MysqlXADataSource', 'connection_pool_datasource': 'com.mysql.jdbc.jdbc2.optional.MysqlConnectionPoolDataSource' } datasources['postgresql'] = { 'driver': 'org.postgresql.Driver', 'datasource': 'org.postgresql.ds.PGSimpleDataSource', 'xa_datasource': 'org.postgresql.xa.PGXADataSource', 'connection_pool_datasource': 'org.postgresql.ds.PGConnectionPoolDataSource' } datasources['mssql'] = { 'driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver', 'datasource': 'com.microsoft.sqlserver.jdbc.SQLServerDataSource', 'xa_datasource': 'com.microsoft.sqlserver.jdbc.SQLServerXADataSource', 'connection_pool_datasource': 'com.microsoft.sqlserver.jdbc.SQLServerConnectionPoolDataSource' } if restype == 'driver': pool_data['driverClassname'] = datasources[vendor]['driver'] else: pool_data['datasourceClassname'] = datasources[vendor][restype] pool_data_properties['url'] = sql_url pool_data_properties['user'] = sql_user pool_data_properties['password'] = sql_password pool_data['properties'] = pool_data_properties pool_data['description'] = description res_data['description'] = description res_data['poolName'] = name res_data['enabled'] = enabled pool_data['steadyPoolSize'] = min_size pool_data['maxPoolSize'] = max_size pool_data['poolResizeQuantity'] = resize_quantity pool_data['idleTimeoutInSeconds'] = idle_timeout pool_data['maxWaitTimeInMillis'] = wait_timeout*1000 pool_data['nonTransactionalConnections'] = non_transactional pool_data['transactionIsolationLevel'] = transaction_isolation pool_data['isIsolationLevelGuaranteed'] = isolation_guaranteed pool_ret = _do_element_present(name, 'jdbc_connection_pool', pool_data, server) res_ret = _do_element_present(res_name, 'jdbc_resource', res_data, server) if not pool_ret['error'] and not res_ret['error']: if not __opts__['test']: ret['result'] = True if pool_ret['create'] or res_ret['create']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'JDBC Datasource set to be created' else: ret['comment'] = 'JDBC Datasource created' elif pool_ret['update'] or res_ret['update']: ret['changes']['pool'] = pool_ret['changes'] ret['changes']['resource'] = res_ret['changes'] if __opts__['test']: ret['comment'] = 'JDBC Datasource set to be updated' else: ret['comment'] = 'JDBC Datasource updated' else: ret['result'] = True ret['changes'] = {} ret['comment'] = 'JDBC Datasource is already up-to-date' else: ret['result'] = False ret['comment'] = 'ERROR: {0} // {1}'.format(pool_ret['error'], res_ret['error']) return ret
[ "def", "jdbc_datasource_present", "(", "name", ",", "description", "=", "''", ",", "enabled", "=", "True", ",", "restype", "=", "'datasource'", ",", "vendor", "=", "'mysql'", ",", "sql_url", "=", "''", ",", "sql_user", "=", "''", ",", "sql_password", "=", ...
Ensures that the JDBC Datasource exists name Name of the datasource description Description of the datasource enabled Is the datasource enabled? defaults to ``true`` restype Resource type, can be ``datasource``, ``xa_datasource``, ``connection_pool_datasource`` or ``driver``, defaults to ``datasource`` vendor SQL Server type, currently supports ``mysql``, ``postgresql`` and ``mssql``, defaults to ``mysql`` sql_url URL of the server in jdbc form sql_user Username for the server sql_password Password for that username min_size Minimum and initial number of connections in the pool, defaults to ``8`` max_size Maximum number of connections that can be created in the pool, defaults to ``32`` resize_quantity Number of connections to be removed when idle_timeout expires, defaults to ``2`` idle_timeout Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300`` wait_timeout Maximum time a caller can wait before timeout, in seconds, defaults to ``60`` non_transactional Return non-transactional connections transaction_isolation Defaults to the JDBC driver default isolation_guaranteed All connections use the same isolation level
[ "Ensures", "that", "the", "JDBC", "Datasource", "exists" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L380-L537
train
saltstack/salt
salt/states/glassfish.py
jdbc_datasource_absent
def jdbc_datasource_absent(name, both=True, server=None): ''' Ensures the JDBC Datasource doesn't exists name Name of the datasource both Delete both the pool and the resource, defaults to ``true`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} pool_ret = _do_element_absent(name, 'jdbc_connection_pool', {'cascade': both}, server) if not pool_ret['error']: if __opts__['test'] and pool_ret['delete']: ret['comment'] = 'JDBC Datasource set to be deleted' elif pool_ret['delete']: ret['result'] = True ret['comment'] = 'JDBC Datasource deleted' else: ret['result'] = True ret['comment'] = 'JDBC Datasource doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(pool_ret['error']) return ret
python
def jdbc_datasource_absent(name, both=True, server=None): ''' Ensures the JDBC Datasource doesn't exists name Name of the datasource both Delete both the pool and the resource, defaults to ``true`` ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} pool_ret = _do_element_absent(name, 'jdbc_connection_pool', {'cascade': both}, server) if not pool_ret['error']: if __opts__['test'] and pool_ret['delete']: ret['comment'] = 'JDBC Datasource set to be deleted' elif pool_ret['delete']: ret['result'] = True ret['comment'] = 'JDBC Datasource deleted' else: ret['result'] = True ret['comment'] = 'JDBC Datasource doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(pool_ret['error']) return ret
[ "def", "jdbc_datasource_absent", "(", "name", ",", "both", "=", "True", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "None", ",", "'changes'", ":", "{", "}", "}", ...
Ensures the JDBC Datasource doesn't exists name Name of the datasource both Delete both the pool and the resource, defaults to ``true``
[ "Ensures", "the", "JDBC", "Datasource", "doesn", "t", "exists" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L540-L564
train
saltstack/salt
salt/states/glassfish.py
system_properties_present
def system_properties_present(server=None, **kwargs): ''' Ensures that the system properties are present properties The system properties ''' ret = {'name': '', 'result': None, 'comment': None, 'changes': {}} del kwargs['name'] try: data = __salt__['glassfish.get_system_properties'](server=server) except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = kwargs ret['result'] = None return ret else: ret['error'] = "Can't connect to the server" return ret ret['changes'] = {'data': data, 'kwargs': kwargs} if not data == kwargs: data.update(kwargs) if not __opts__['test']: try: __salt__['glassfish.update_system_properties'](data, server=server) ret['changes'] = kwargs ret['result'] = True ret['comment'] = 'System properties updated' except CommandExecutionError as error: ret['comment'] = error ret['result'] = False else: ret['result'] = None ret['changes'] = kwargs ret['coment'] = 'System properties would have been updated' else: ret['changes'] = {} ret['result'] = True ret['comment'] = 'System properties are already up-to-date' return ret
python
def system_properties_present(server=None, **kwargs): ''' Ensures that the system properties are present properties The system properties ''' ret = {'name': '', 'result': None, 'comment': None, 'changes': {}} del kwargs['name'] try: data = __salt__['glassfish.get_system_properties'](server=server) except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = kwargs ret['result'] = None return ret else: ret['error'] = "Can't connect to the server" return ret ret['changes'] = {'data': data, 'kwargs': kwargs} if not data == kwargs: data.update(kwargs) if not __opts__['test']: try: __salt__['glassfish.update_system_properties'](data, server=server) ret['changes'] = kwargs ret['result'] = True ret['comment'] = 'System properties updated' except CommandExecutionError as error: ret['comment'] = error ret['result'] = False else: ret['result'] = None ret['changes'] = kwargs ret['coment'] = 'System properties would have been updated' else: ret['changes'] = {} ret['result'] = True ret['comment'] = 'System properties are already up-to-date' return ret
[ "def", "system_properties_present", "(", "server", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "''", ",", "'result'", ":", "None", ",", "'comment'", ":", "None", ",", "'changes'", ":", "{", "}", "}", "del", "kwargs...
Ensures that the system properties are present properties The system properties
[ "Ensures", "that", "the", "system", "properties", "are", "present" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L567-L608
train
saltstack/salt
salt/states/glassfish.py
system_properties_absent
def system_properties_absent(name, server=None): ''' Ensures that the system property doesn't exists name Name of the system property ''' ret = {'name': '', 'result': None, 'comment': None, 'changes': {}} try: data = __salt__['glassfish.get_system_properties'](server=server) except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = {'Name': name} ret['result'] = None return ret else: ret['error'] = "Can't connect to the server" return ret if name in data: if not __opts__['test']: try: __salt__['glassfish.delete_system_properties'](name, server=server) ret['result'] = True ret['comment'] = 'System properties deleted' except CommandExecutionError as error: ret['comment'] = error ret['result'] = False else: ret['result'] = None ret['comment'] = 'System properties would have been deleted' ret['changes'] = {'Name': name} else: ret['result'] = True ret['comment'] = 'System properties are already absent' return ret
python
def system_properties_absent(name, server=None): ''' Ensures that the system property doesn't exists name Name of the system property ''' ret = {'name': '', 'result': None, 'comment': None, 'changes': {}} try: data = __salt__['glassfish.get_system_properties'](server=server) except requests.ConnectionError as error: if __opts__['test']: ret['changes'] = {'Name': name} ret['result'] = None return ret else: ret['error'] = "Can't connect to the server" return ret if name in data: if not __opts__['test']: try: __salt__['glassfish.delete_system_properties'](name, server=server) ret['result'] = True ret['comment'] = 'System properties deleted' except CommandExecutionError as error: ret['comment'] = error ret['result'] = False else: ret['result'] = None ret['comment'] = 'System properties would have been deleted' ret['changes'] = {'Name': name} else: ret['result'] = True ret['comment'] = 'System properties are already absent' return ret
[ "def", "system_properties_absent", "(", "name", ",", "server", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "''", ",", "'result'", ":", "None", ",", "'comment'", ":", "None", ",", "'changes'", ":", "{", "}", "}", "try", ":", "data", "=", ...
Ensures that the system property doesn't exists name Name of the system property
[ "Ensures", "that", "the", "system", "property", "doesn", "t", "exists" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L611-L647
train