code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
import logging class Logger(object): """ A wrapped logger class that allows us to implement a singleton. """ class __Logger: """ Class that enables singleton behaviour """ def __init__(self, logger_name): """ Initialise singleton Args: logger_name(string): The name of the logger """ self.logger_name = logger_name def __str__(self): """ Get a string representation of this instance Returns: (string): String representation of this instance """ return repr(self) + self.logger_name logger_name = '' def setLevel(self, level): """ Set logging to be at level Args: level(logging.LEVEL): The logging level to set """ logging.info("Logger::setLevel: Logging level '%s' enabled" % level) logging.getLogger(self.logger_name).setLevel(level) def info(self, msg): """ Log message at level info Args: msg(string): The message to log """ logging.getLogger(self.logger_name).info(msg) def warning(self, msg): """ Log message at level warning Args: msg(string): The message to log """ logging.getLogger(self.logger_name).warning(msg) def error(self, msg): """ Log message at level error Args: msg(string): The message to log """ logging.getLogger(self.logger_name).error(msg) def critical(self, msg): """ Log message at level critical Args: msg(string): The message to log """ logging.getLogger(self.logger_name).critical(msg) def debug(self, msg): """ Log message at level debug Args: msg(string): The message to log """ logging.getLogger(self.logger_name).debug(msg) # Wrapping singleton class begins instance = None def __new__(cls, logger_name="default"): if not Logger.instance: Logger.instance = Logger.__Logger(logger_name) return Logger.instance def __getattr__(self, name): return getattr(self.instance, name) def __setattr__(self, name): return setattr(self.instance, name)
/salt-shaker-1.0.4.tar.gz/salt-shaker-1.0.4/shaker/libs/logger.py
0.887717
0.211478
logger.py
pypi
import shaker.libs.logger import paramiko import pygit2 class Pygit2SSHUnsupportedError(Exception): pass class Pygit2KepairFromAgentUnsupportedError(Exception): pass class Pygit2SSHAgentMissingKeysError(Exception): pass link_installation = "http://www.pygit2.org/install.html" error_message_ssh_support = ("shaker.libs.util:check_pygit2: No SSH support found in libgit2. " "Please install a version with ssh enabled (%s).\n" "Note, MacOS users using brew should check the output of 'brew info libgit2' " "for ssh support" % (link_installation)) error_message_credentials_support = ("shaker.libs.util:check_pygit2: Module 'KeypairFromAgent' " "not found in pygit2.features. " "Please check your pygit installation (%s)." % (link_installation)) error_message_ssh_missing_keys = ("shaker.libs.util:check_pygit2: The ssh agent doesnt appear to know " " your github key. " "Make sure you've added your key with 'ssh-add ~/.id_rsa' or similar. " " A list of the keys the agent know about can be seen with 'ssh-add -L'.") def pygit2_parse_error(e): """ Parse a pygit2 specific error into a more understandable context. Will also run some checks to try and help with the problem. Args: e(Exception): The exception that was raised """ # Common errors to look for are, # AttributeError: 'module' object has no attribute 'KeypairFromAgent' # _pygit2.GitError: Unsupported URL protocol if (isinstance(e, pygit2.GitError) and e.message == "Unsupported URL protocol"): raise Pygit2SSHUnsupportedError(Pygit2SSHUnsupportedError) elif (isinstance(e, AttributeError) and e.message == "'module' object has no attribute 'KeypairFromAgent'"): raise Pygit2KepairFromAgentUnsupportedError(error_message_credentials_support) else: raise Pygit2SSHAgentMissingKeysError(error_message_ssh_missing_keys) def pygit2_info(): """ Output key pygit2/libgit2 information """ link_versions = "http://www.pygit2.org/install.html#version-numbers" message_versions = ("shaker.libs.util:check_pygit2: pygit2 *requires* the correct " "version of libgit2, this version was built against libgit2 version '%s'. " "Please check the versions on your system if you experience " "problems. (For compatibility, please refer to %s)" % (pygit2.LIBGIT2_VERSION, link_versions)) shaker.libs.logger.Logger().warning(message_versions) def pygit2_check(): """ Run all checks for pygit2 sanity and raise exceptions if checks fail Raises: Pygit2SSHUnsupportedError: On ssh support check failed Pygit2KepairFromAgentUnsupportedError: On credential support check failed """ if not pygit2_check_ssh(): raise Pygit2SSHUnsupportedError(error_message_ssh_support) elif not pygit2_check_credentials(): raise Pygit2KepairFromAgentUnsupportedError(error_message_credentials_support) elif not pygit2_agent_has_keys(): raise Pygit2SSHAgentMissingKeysError(error_message_ssh_missing_keys) def pygit2_check_ssh(): """ Check for common pygit2 ssh problems Return: bool: True if no problems found, False otherwise """ # Check for ssh support in libgit2 if not (pygit2.features & pygit2.GIT_FEATURE_SSH): shaker.libs.logger.Logger().critical(error_message_ssh_support) return False message_ok = ("shaker.libs.util:pygit2_check_ssh: No ssh problems found. ") shaker.libs.logger.Logger().debug(message_ok) return True def pygit2_check_credentials(): """ Check for common pygit2 credentials problems Return: bool: True if no problems found, False otherwise """ link_installation = "http://www.pygit2.org/install.html" # Check for KeypairFromAgent support in pygit2 if "KeypairFromAgent" not in vars(pygit2.credentials): shaker.libs.logger.Logger().critical(error_message_credentials_support) return False message_ok = ("shaker.libs.util:pygit2_check_credentials: No credential problems found. ") shaker.libs.logger.Logger().debug(message_ok) return True def pygit2_agent_has_keys(): """ Check for common pygit2 ssh agent problems Return: bool: True if no problems found, False otherwise """ agent = paramiko.Agent() keys = agent.get_keys() if len(keys) < 1: return False shaker.libs.logger.Logger().debug("shaker.libs.util:check_pygit2: " "Please check that the keys listed contain your github key...") for key in keys: shaker.libs.logger.Logger().debug("shaker.libs.util:check_pygit2: " "Found ssh agent key: %s" % key.get_base64()) return True
/salt-shaker-1.0.4.tar.gz/salt-shaker-1.0.4/shaker/libs/pygit2_utils.py
0.492188
0.16944
pygit2_utils.py
pypi
# Salt Integration Testing (SIT) [![Build Status](https://travis-ci.org/dandb/salt-integration-testing.svg)](https://travis-ci.org/dandb/salt-integration-testing) [![Coverage Status](https://coveralls.io/repos/dandb/salt-integration-testing/badge.svg?branch=master&service=github)](https://coveralls.io/github/dandb/salt-integration-testing?branch=master) Open source project that allows users to test applying states to roles using AWS ECS (Docker). Great for integrating within your CI/CD environment! Technologies used: * Python2.6/2.7 * Docker * AWS ECS ## Before we begin Setting up a CI/CD pipeline is beyond the scope of this README. There are many tools you can use. The following assumptions are made in utilizing this project: 1. You are using AWS and your region contains a(n): * VPC that houses your CI resources * Security group that is used by your CI nodes * Subnet that your CI nodes are using * Key pair you have access to * IAM key credentials with permissions including: AmazonEC2FullAccess and AmazonEC2ContainerServiceFullAccess ## Running SIT from root directory of SIT project ```bash python -m sit.launch_review_job <job_name> <build_number> <ci_node_private_ip_address> </path/to/configs_directory> ``` * Job name and build number are used to generate a naming convention for the Docker image minion. * Private IP is used by the minion to point to its salt-master, the CI node. For this to work, you’ll need to do a few things... ## Let's begin! 1. Clone the repository ```bash git clone git@github.com:dandb/salt-integration-testing.git ``` 2. Install the project ```bash python setup.py install ``` 3. Set your configs 1. Configuration files can either be stored within the SIT repository, or anywhere else with your file system. We recommend storing the config file in /etc/sit/configs and managing via a salt state. 2. boto3 AWS credentials Time to set a profile. In this example, we are using “sit” as the profile. You may use the default profile if you like ~/.aws/credentials ```python [sit] aws_access_key_id=<access_key> aws_secret_access_key=<secret_key> ``` ~/.aws/configs ```python [profile sit] region=<region> ``` If you are using a profile other than default, you will have to change the default inside configs/sit.yml 3. /path/to/configs/troposphere.yml You will need to find the following values and add them: * Security group that your CI instances are using * Subnet that your CI instances are provisioned within * key pair you have access to * Ami_id from: [AWS Marketplace](https://aws.amazon.com/marketplace/search/results/ref=dtl_navgno_search_box?page=1&searchTerms=Amazon+ECS-Optimized+Amazon+Linux+AMI) 4. Launch the infrastructure in root of SIT project: ``` python setup.py troposphere ``` Errors, if any, will be shown in the terminal. Once you fix the problems (most likely an error in configs from above steps), re-run this step. 5. SIT Salt states You are most likely setting up SIT within a CI/CD environment. Your CI nodes will require the SIT repository and your configs. Here are some sample states to help you get set up! ``` {%- set sitdir = '/location/you/want/sit/to/reside/' %} {{ sitdir }}: file.directory: - user: {{ user}} - group: {{ user }} - dir_mode: 755 - makedirs: True sit: git.latest: - name: git@github.com:dandb/salt-integration-testing.git - rev: master - target: {{ sitdir }} - force: True - require: - file: {{ sitdir }} ``` 6. Configs for SIT 1. Create a directory called "configs" with these three files: * sit.yml * troposphere.yml * roles.yml You can copy these files form the SIT repository and edit them as necessary 2. Replace troposphere.yml with the followowing four variables. You should use private pillar to store sensitive information inside: configs/troposphere.yml ```python security_groups: {{ pillar['sit']['security_group_names'] }} key_name: {{ pillar['sit']['key_name'] }} ami_id: {{ pillar['sit']['ecs_ami_id'] }} subnet: {{ pillar['sit']['subnet'] }} ``` configs/roles.yml: Add the roles and states you would like to test in this file. A commented out example can be found within the file for guidelines. 3. configs state: ```python /location/of/where/to/keep/user/generated/sit/configs: file.recurse: - source: salt://location/of/user/generated/sit/configs - template: jinja - user: {{ user }} - group: {{ user }} - file_mode: '0755' - makedirs: True ``` 7. Now you can highstate your CI node(s) with these configurations ## Initiate/Teardown SIT You will have to do the following (potentially each build, but depends on your setup) before and after running SIT: 1. Initiation Script: create an initiation script that will run inside the CI node before any SIT-like job is to be run * Edit CI Node's master configs (file_roots and pillar_roots) to point to the workspace of the salt repo. (via SED) * Start salt-master on the CI node * Start Redis service on the CI node * Copy down the roles.yml file from your salt project workspace into the sit configs directory. * Install the SIT requirements. ```python pip install -r /path/to/sit/requirements.txt ``` Once your job is done, you will want the CI node to be able to run other jobs, and even run SIT-like jobs again. This resets the work that the Initiate did. 2. Teardown script: create a teardown script that will run after SIT tool is done running * Delete all keys accepted by salt-master (CI node) * Flush Redis of data * Stop salt-master service * Stop redis * Return CI node salt-master configs (file_roots and pillar_roots) to an easy to edit target (via SED) ## Contributing to SIT * Please create a pull-request (preferably referring to an issue) with a single, meaningful commit message stating what you are accomplishing. * Add unit tests to new code * Ensure all unit tests pass and coverage has not dropped ``` python setup.py test ``` * To check coverage, you can run: ``` python setup.py coverage ```
/salt-sit-0.0.3.tar.gz/salt-sit-0.0.3/README.md
0.866104
0.911456
README.md
pypi
# Import Python libs from __future__ import absolute_import, print_function, unicode_literals import copy import json import logging # Import Salt libs from salt.roster import get_roster_file try: from salt.utils.path import which as utils_which from salt.utils.stringutils import to_str as utils_to_str except ImportError: from salt.utils import which as utils_which from salt.utils import to_str as utils_to_str # Import salt-sproxy modules import salt_sproxy._roster log = logging.getLogger(__name__) CONVERSION = { "ansible_ssh_host": "host", "ansible_ssh_port": "port", "ansible_ssh_user": "user", "ansible_ssh_pass": "passwd", "ansible_sudo_pass": "sudo", "ansible_ssh_private_key_file": "priv", } __virtualname__ = "ansible" def __virtual__(): return ( utils_which("ansible-inventory") and __virtualname__, "Install `ansible` to use inventory", ) def targets(tgt, tgt_type="glob", **kwargs): """ Return the targets from the ansible inventory_file Default: /etc/salt/roster """ inventory = __runner__["salt.cmd"]( "cmd.run", "ansible-inventory -i {0} --list".format(get_roster_file(__opts__)) ) __context__["inventory"] = json.loads(utils_to_str(inventory)) if tgt_type == "nodegroup": hosts = _get_hosts_from_group(tgt) return {host: _get_hostvars(host) for host in hosts} pool = {host: _get_hostvars(host) for host in _get_hosts_from_group("all")} pool = salt_sproxy._roster.load_cache( pool, __runner__, __opts__, tgt, tgt_type=tgt_type ) log.debug("Ansible devices pool") log.debug(pool) engine = salt_sproxy._roster.TGT_FUN[tgt_type] return engine(pool, tgt, opts=__opts__) def _get_hosts_from_group(group): inventory = __context__["inventory"] hosts = [host for host in inventory.get(group, {}).get("hosts", [])] for child in inventory.get(group, {}).get("children", []): hosts.extend(_get_hosts_from_group(child)) return hosts def _get_hostvars(host): hostvars = __context__["inventory"]["_meta"].get("hostvars", {}).get(host, {}) ret = copy.deepcopy(__opts__.get("roster_defaults", {})) for key, value in CONVERSION.items(): if key in hostvars: ret[value] = hostvars.pop(key) ret["minion_opts"] = hostvars if "host" not in ret: ret["host"] = host return ret
/salt-sproxy-2023.8.0.tar.gz/salt-sproxy-2023.8.0/salt_sproxy/_roster/ansible.py
0.64131
0.164819
ansible.py
pypi
# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at conduct@saltstack.com. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
/salt-ssh-9000.tar.gz/salt-ssh-9000/CODE_OF_CONDUCT.md
0.577495
0.685755
CODE_OF_CONDUCT.md
pypi
.. _glossary: ======== Glossary ======== .. glossary:: Auto-Order The evaluation of states in the order that they are defined in a SLS file. *See also*: :ref:`ordering <ordering_auto_order>`. Bootstrap A stand-alone Salt project which can download and install a Salt master and/or a Salt minion onto a host. *See also*: `salt-bootstrap <https://github.com/saltstack/salt-bootstrap>`_. Compound Matcher A combination of many target definitions that can be combined with boolean operators. *See also*: :ref:`targeting <targeting-compound>`. EAuth Shorthand for 'external authentication'. A system for calling to a system outside of Salt in order to authenticate users and determine if they are allowed to issue particular commands to Salt. *See also*: :ref:`external auth<acl-eauth>`. Environment A directory tree containing state files which can be applied to minions. *See also*: :ref:`top file<states-top-environments>`. Execution Function A Python function inside an Execution Module that may take arguments and performs specific system-management tasks. *See also*: :ref:`the list of execution modules <all-salt.modules>`. External Job Cache An external data-store that can archive information about jobs that have been run. A default returner. *See also*: :conf_master:`ext_job_cache`, :ref:`the list of returners <all-salt.returners>`. Execution Module A Python module that contains execution functions which directly perform various system-management tasks on a server. Salt ships with a number of execution modules but users can also write their own execution modules to perform specialized tasks. *See also*: :ref:`the list of execution modules <all-salt.modules>`. External Pillar A module that accepts arbitrary arguments and returns a dictionary. The dictionary is automatically added to a pillar for a minion. Event A notice emitted onto an event bus. Events are often driven by requests for actions to occur on a minion or master and the results of those actions. *See also*: :ref:`Salt Reactor <reactor>`. File Server A local or remote location for storing both Salt-specific files such as top files or SLS files as well as files that can be distributed to minions, such as system configuration files. *See also*: :ref:`Salt's file server <file-server>`. Grain A key-value pair which contains a fact about a system, such as its hostname, network addresses. *See also*: :ref:`targeting with grains <targeting-grains>`. Highdata The data structure in a SLS file the represents a set of state declarations. *See also*: :ref:`state layers <state-layers-high-data>`. Highstate The collection of states to be applied to a system. *See also*: :ref:`state layers <state-layers-highstate>`. Idempotent An action that ensures the system is in a well-known state regardless of the system's state before the action is applied. A corollary to this is that applying the action multiple times results in no changes to the system. State module functions should be idempotent. Some state module functions, such as :mod:`cmd.run <salt.states.cmd.run>` are not idempotent by default but can be made idempotent with the proper use of requisites such as :ref:`unless <unless-requisite>` and :ref:`onlyif <onlyif-requisite>`. For more information, *see* `wikipedia <https://en.wikipedia.org/wiki/Idempotent>`_. Jinja A templating language which allows variables and simple logic to be dynamically inserted into static text files when they are rendered. *See also*: :py:mod:`Salt's Jinja documentation <salt.renderers.jinja>`. Job The complete set of tasks to be performed by the execution of a Salt command are a single job. *See also*: :py:mod:`jobs runner <salt.runners.jobs>`. Job Cache A storage location for job results, which may then be queried by a salt runner or an external system. May be local to a salt master or stored externally. Job ID A unique identifier to represent a given :term:`job <Job>`. This is often shortened to JID. Low State The collection of processed states after requisites and order are evaluated. *See also*: :ref:`state layers <state-layers-low-state>`. Master A central Salt daemon from which commands can be issued to listening minions. Masterless A minion which does not require a Salt master to operate. All configuration is local. *See also*: :conf_minion:`file_client`. Master Tops A system for the master that allows hooks into external systems to generate top file data. Mine A facility to collect arbitrary data from minions and store that data on the master. This data is then available to all other minions. (Sometimes referred to as Salt Mine.) *See also*: :ref:`Salt Mine <salt-mine>`. Minion A server running a Salt minion daemon which can listen to commands from a master and perform the requested tasks. Generally, minions are servers which are to be controlled using Salt. Minion ID A globally unique identifier for a minion. *See also*: :conf_minion:`id`. Multi-Master The ability for a minion to be actively connected to multiple Salt masters at the same time in high-availability environments. Node Group A pre-defined group of minions declared in the master configuration file. *See also*: :ref:`targeting <targeting-nodegroups>`. Outputter A formatter for defining the characteristics of output data from a Salt command. *See also*: :ref:`list of outputters <all-salt.output>`. Peer Communication The ability for minions to communicate directly with other minions instead of brokering commands through the Salt master. *See also*: :ref:`peer communication <peer>`. Pillar A simple key-value store for user-defined data to be made available to a minion. Often used to store and distribute sensitive data to minions. *See also*: :ref:`Pillar <salt-pillars>`, :ref:`list of Pillar modules <all-salt.pillars>`. Proxy Minion A minion which can control devices that are unable to run a Salt minion locally, such as routers and switches. PyDSL A Pythonic domain-specific-language used as a Salt renderer. PyDSL can be used in cases where adding pure Python into SLS files is beneficial. *See also*: :py:mod:`PyDSL <salt.renderers.pydsl>`. Reactor An interface for listening to events and defining actions that Salt should taken upon receipt of given events. *See also*: :ref:`Reactor <reactor>`. Render Pipe Allows SLS files to be rendered by multiple renderers, with each renderer receiving the output of the previous. *See also*: :ref:`composing renderers <renderers-composing>`. Renderer Responsible for translating a given data serialization format such as YAML or JSON into a Python data structure that can be consumed by Salt. *See also*: :ref:`list of renderers <all-salt.renderers>`. Returner Allows for the results of a Salt command to be sent to a given data-store such as a database or log file for archival. *See also*: :ref:`list of returners <all-salt.returners>`. Roster A flat-file list of target hosts. (Currently only used by salt-ssh.) Runner Module A module containing a set of runner functions. *See also*: :ref:`list of runner modules <all-salt.runners>`. Runner Function A function which is called by the :command:`salt-run` command and executes on the master instead of on a minion. *See also*: :term:`Runner Module`. Salt Cloud A suite of tools used to create and deploy systems on many hosted cloud providers. *See also*: :ref:`salt-cloud <salt-cloud>`. Salt SSH A configuration management and remote orchestration system that does not require that any software besides SSH be installed on systems to be controlled. Salt Thin A subset of the normal Salt distribution that does not include any transport routines. A Salt Thin bundle can be dropped onto a host and used directly without any requirement that the host be connected to a network. Used by Salt SSH. *See also*: :py:mod:`thin runner <salt.runners.thin>`. Salt Virt Used to manage the creation and deployment of virtual machines onto a set of host machines. Often used to create and deploy private clouds. *See also*: :py:mod:`virt runner <salt.runners.virt>`. SLS Module Contains a set of :term:`state declarations <State Declaration>`. State Compiler Translates :term:`highdata <Highdata>` into lowdata. State Declaration A data structure which contains a unique ID and describes one or more states of a system such as ensuring that a package is installed or a user is defined. *See also*: :ref:`highstate structure <state-declaration>`. State Function A function contained inside a :term:`state module <State Module>` which can manages the application of a particular state to a system. State functions frequently call out to one or more :term:`execution modules <Execution Module>` to perform a given task. State Module A module which contains a set of state functions. *See also*: :ref:`list of state modules <all-salt.states>`. State Run The application of a set of states on a set of systems. Syndic A forwarder which can relay messages between tiered masters. **See also**: :ref:`Syndic <syndic>`. Target Minion(s) to which a given salt command will apply. *See also*: :ref:`targeting <targeting>`. Top File Determines which SLS files should be applied to various systems and organizes those groups of systems into environments. *See also*: :ref:`top file <states-top>`, :ref:`list of master top modules <all-salt.tops>`. __virtual__ A function in a module that is called on module load to determine whether or not the module should be available to a minion. This function commonly contains logic to determine if all requirements for a module are available, such as external libraries. Worker A master process which can send notices and receive replies from minions. *See also*: :conf_master:`worker_threads`.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/glossary.rst
0.930561
0.730972
glossary.rst
pypi
=============================== SLS Template Variable Reference =============================== .. warning:: In the 3005 release ``sls_path``, ``tplfile``, and ``tpldir`` have had some significant improvements which have the potential to break states that rely on old and broken functionality. The template engines available to sls files and file templates come loaded with a number of context variables. These variables contain information and functions to assist in the generation of templates. See each variable below for its availability -- not all variables are available in all templating contexts. Salt ==== The `salt` variable is available to abstract the salt library functions. This variable is a python dictionary containing all of the functions available to the running salt minion. It is available in all salt templates. .. code-block:: jinja {% for file in salt['cmd.run']('ls -1 /opt/to_remove').splitlines() %} /opt/to_remove/{{ file }}: file.absent {% endfor %} Opts ==== The `opts` variable abstracts the contents of the minion's configuration file directly to the template. The `opts` variable is a dictionary. It is available in all templates. .. code-block:: jinja {{ opts['cachedir'] }} The ``config.get`` function also searches for values in the `opts` dictionary. Pillar ====== The `pillar` dictionary can be referenced directly, and is available in all templates: .. code-block:: jinja {{ pillar['key'] }} Using the ``pillar.get`` function via the `salt` variable is generally recommended since a default can be safely set in the event that the value is not available in pillar and dictionaries can be traversed directly: .. code-block:: jinja {{ salt['pillar.get']('key', 'failover_value') }} {{ salt['pillar.get']('stuff:more:deeper') }} Grains ====== The `grains` dictionary makes the minion's grains directly available, and is available in all templates: .. code-block:: jinja {{ grains['os'] }} The ``grains.get`` function can be used to traverse deeper grains and set defaults: .. code-block:: jinja {{ salt['grains.get']('os') }} saltenv ======= The `saltenv` variable is available in only in sls files when gathering the sls from an environment. .. code-block:: jinja {{ saltenv }} SLS Only Variables ================== The following are only available when processing sls files. If you need these in other templates, you can usually pass them in as template context. sls --- The `sls` variable contains the sls reference value, and is only available in the actual SLS file (not in any files referenced in that SLS). The sls reference value is the value used to include the sls in top files or via the include option. .. code-block:: jinja {{ sls }} slspath ------- The `slspath` variable contains the path to the directory of the current sls file. The value of `slspath` in files referenced in the current sls depends on the reference method. For jinja includes `slspath` is the path to the current directory of the file. For salt includes `slspath` is the path to the directory of the included file. If current sls file is in root of the file roots, this will return "" .. code-block:: jinja {{ slspath }} sls_path -------- A version of `slspath` with underscores as path separators instead of slashes. So, if `slspath` is `path/to/state` then `sls_path` is `path_to_state` .. code-block:: jinja {{ sls_path }} slsdotpath ---------- A version of `slspath` with dots as path separators instead of slashes. So, if `slspath` is `path/to/state` then `slsdotpath` is `path.to.state`. This is same as `sls` if `sls` points to a directory instead if a file. .. code-block:: jinja {{ slsdotpath }} slscolonpath ------------ A version of `slspath` with colons (`:`) as path separators instead of slashes. So, if `slspath` is `path/to/state` then `slscolonpath` is `path:to:state`. .. code-block:: jinja {{ slscolonpath }} tplpath ------- Full path to sls template file being process on local disk. This is usually pointing to a copy of the sls file in a cache directory. This will be in OS specific format (Windows vs POSIX). (It is probably best not to use this.) .. code-block:: jinja {{ tplpath }} tplfile ------- Relative path to exact sls template file being processed relative to file roots. .. code-block:: jinja {{ tplfile }} tpldir ------ Directory, relative to file roots, of the current sls file. If current sls file is in root of the file roots, this will return ".". This is usually identical to `slspath` except in case of root-level sls, where this will return a "`.`". A Common use case for this variable is to generate relative salt urls like: .. code-block:: jinja my-file: file.managed: source: salt://{{ tpldir }}/files/my-template tpldot ------ A version of `tpldir` with dots as path separators instead of slashes. So, if `tpldir` is `path/to/state` then `tpldot` is `path.to.state`. NOTE: if `tpldir` is `.`, this will be set to "" .. code-block:: jinja {{ tpldot }}
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/states/vars.rst
0.814422
0.690331
vars.rst
pypi
.. _compiler-ordering: ===================================== Understanding State Compiler Ordering ===================================== .. note:: This tutorial is an intermediate level tutorial. Some basic understanding of the state system and writing Salt Formulas is assumed. Salt's state system is built to deliver all of the power of configuration management systems without sacrificing simplicity. This tutorial is made to help users understand in detail just how the order is defined for state executions in Salt. This tutorial is written to represent the behavior of Salt as of version 0.17.0. Compiler Basics =============== To understand ordering in depth some very basic knowledge about the state compiler is very helpful. No need to worry though, this is very high level! High Data and Low Data ---------------------- When defining Salt Formulas in YAML the data that is being represented is referred to by the compiler as High Data. When the data is initially loaded into the compiler it is a single large python dictionary, this dictionary can be viewed raw by running: .. code-block:: bash salt '*' state.show_highstate This "High Data" structure is then compiled down to "Low Data". The Low Data is what is matched up to create individual executions in Salt's configuration management system. The low data is an ordered list of single state calls to execute. Once the low data is compiled the evaluation order can be seen. The low data can be viewed by running: .. code-block:: bash salt '*' state.show_lowstate .. note:: The state execution module contains MANY functions for evaluating the state system and is well worth a read! These routines can be very useful when debugging states or to help deepen one's understanding of Salt's state system. As an example, a state written thusly: .. code-block:: yaml apache: pkg.installed: - name: httpd service.running: - name: httpd - watch: - file: apache_conf - pkg: apache apache_conf: file.managed: - name: /etc/httpd/conf.d/httpd.conf - source: salt://apache/httpd.conf Will have High Data which looks like this represented in json: .. code-block:: json { "apache": { "pkg": [ { "name": "httpd" }, "installed", { "order": 10000 } ], "service": [ { "name": "httpd" }, { "watch": [ { "file": "apache_conf" }, { "pkg": "apache" } ] }, "running", { "order": 10001 } ], "__sls__": "blah", "__env__": "base" }, "apache_conf": { "file": [ { "name": "/etc/httpd/conf.d/httpd.conf" }, { "source": "salt://apache/httpd.conf" }, "managed", { "order": 10002 } ], "__sls__": "blah", "__env__": "base" } } The subsequent Low Data will look like this: .. code-block:: json [ { "name": "httpd", "state": "pkg", "__id__": "apache", "fun": "installed", "__env__": "base", "__sls__": "blah", "order": 10000 }, { "name": "httpd", "watch": [ { "file": "apache_conf" }, { "pkg": "apache" } ], "state": "service", "__id__": "apache", "fun": "running", "__env__": "base", "__sls__": "blah", "order": 10001 }, { "name": "/etc/httpd/conf.d/httpd.conf", "source": "salt://apache/httpd.conf", "state": "file", "__id__": "apache_conf", "fun": "managed", "__env__": "base", "__sls__": "blah", "order": 10002 } ] This tutorial discusses the Low Data evaluation and the state runtime. Ordering Layers =============== Salt defines 2 order interfaces which are evaluated in the state runtime and defines these orders in a number of passes. Definition Order ---------------- .. note:: The Definition Order system can be disabled by turning the option ``state_auto_order`` to ``False`` in the master configuration file. The top level of ordering is the `Definition Order`. The `Definition Order` is the order in which states are defined in salt formulas. This is very straightforward on basic states which do not contain ``include`` statements or a ``top`` file, as the states are just ordered from the top of the file, but the include system starts to bring in some simple rules for how the `Definition Order` is defined. Looking back at the "Low Data" and "High Data" shown above, the order key has been transparently added to the data to enable the `Definition Order`. The Include Statement ~~~~~~~~~~~~~~~~~~~~~ Basically, if there is an include statement in a formula, then the formulas which are included will be run BEFORE the contents of the formula which is including them. Also, the include statement is a list, so they will be loaded in the order in which they are included. In the following case: ``foo.sls`` .. code-block:: yaml include: - bar - baz ``bar.sls`` .. code-block:: yaml include: - quo ``baz.sls`` .. code-block:: yaml include: - qux In the above case if ``state.apply foo`` were called then the formulas will be loaded in the following order: 1. quo 2. bar 3. qux 4. baz 5. foo The `order` Flag ---------------- The `Definition Order` happens transparently in the background, but the ordering can be explicitly overridden using the ``order`` flag in states: .. code-block:: yaml apache: pkg.installed: - name: httpd - order: 1 This order flag will over ride the definition order, this makes it very simple to create states that are always executed first, last or in specific stages, a great example is defining a number of package repositories that need to be set up before anything else, or final checks that need to be run at the end of a state run by using ``order: last`` or ``order: -1``. When the order flag is explicitly set the `Definition Order` system will omit setting an order for that state and directly use the order flag defined. Lexicographical Fall-back ------------------------- Salt states were written to ALWAYS execute in the same order. Before the introduction of `Definition Order` in version 0.17.0 everything was ordered lexicographically according to the name of the state, then function then id. This is the way Salt has always ensured that states always run in the same order regardless of where they are deployed, the addition of the `Definition Order` method mealy makes this finite ordering easier to follow. The lexicographical ordering is still applied but it only has any effect when two order statements collide. This means that if multiple states are assigned the same order number that they will fall back to lexicographical ordering to ensure that every execution still happens in a finite order. .. note:: If running with ``state_auto_order: False`` the ``order`` key is not set automatically, since the Lexicographical order can be derived from other keys. Requisite Ordering ------------------ Salt states are fully declarative, in that they are written to declare the state in which a system should be. This means that components can require that other components have been set up successfully. Unlike the other ordering systems, the `Requisite` system in Salt is evaluated at runtime. The requisite system is also built to ensure that the ordering of execution never changes, but is always the same for a given set of states. This is accomplished by using a runtime that processes states in a completely predictable order instead of using an event loop based system like other declarative configuration management systems. Runtime Requisite Evaluation ---------------------------- The requisite system is evaluated as the components are found, and the requisites are always evaluated in the same order. This explanation will be followed by an example, as the raw explanation may be a little dizzying at first as it creates a linear dependency evaluation sequence. The "Low Data" is an ordered list or dictionaries, the state runtime evaluates each dictionary in the order in which they are arranged in the list. When evaluating a single dictionary it is checked for requisites, requisites are evaluated in order, ``require`` then ``watch`` then ``prereq``. .. note:: If using requisite in statements like require_in and watch_in these will be compiled down to require and watch statements before runtime evaluation. Each requisite contains an ordered list of requisites, these requisites are looked up in the list of dictionaries and then executed. Once all requisites have been evaluated and executed then the requiring state can safely be run (or not run if requisites have not been met). This means that the requisites are always evaluated in the same order, again ensuring one of the core design principals of Salt's State system to ensure that execution is always finite is intact. Simple Runtime Evaluation Example --------------------------------- Given the above "Low Data" the states will be evaluated in the following order: 1. The pkg.installed is executed ensuring that the apache package is installed, it contains no requisites and is therefore the first defined state to execute. 2. The service.running state is evaluated but NOT executed, a watch requisite is found, therefore they are read in order, the runtime first checks for the file, sees that it has not been executed and calls for the file state to be evaluated. 3. The file state is evaluated AND executed, since it, like the pkg state does not contain any requisites. 4. The evaluation of the service state continues, it next checks the pkg requisite and sees that it is met, with all requisites met the service state is now executed. Best Practice ------------- The best practice in Salt is to choose a method and stick with it, official states are written using requisites for all associations since requisites create clean, traceable dependency trails and make for the most portable formulas. To accomplish something similar to how classical imperative systems function all requisites can be omitted and the ``failhard`` option then set to ``True`` in the master configuration, this will stop all state runs at the first instance of a failure. In the end, using requisites creates very tight and fine grained states, not using requisites makes full sequence runs and while slightly easier to write, and gives much less control over the executions.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/states/compiler_ordering.rst
0.904661
0.669924
compiler_ordering.rst
pypi
.. _state-system-reference: ====================== State System Reference ====================== Salt offers an interface to manage the configuration or "state" of the Salt minions. This interface is a fully capable mechanism used to enforce the state of systems from a central manager. .. toctree:: :glob: * State Management ================ State management, also frequently called Software Configuration Management (SCM), is a program that puts and keeps a system into a predetermined state. It installs software packages, starts or restarts services or puts configuration files in place and watches them for changes. Having a state management system in place allows one to easily and reliably configure and manage a few servers or a few thousand servers. It allows configurations to be kept under version control. Salt States is an extension of the Salt Modules that we discussed in the previous :ref:`remote execution <tutorial-remote-execution-modules>` tutorial. Instead of calling one-off executions the state of a system can be easily defined and then enforced. Understanding the Salt State System Components ============================================== The Salt state system is comprised of a number of components. As a user, an understanding of the SLS and renderer systems are needed. But as a developer, an understanding of Salt states and how to write the states is needed as well. .. note:: States are compiled and executed only on minions that have been targeted. To execute functions directly on masters, see :ref:`runners <runners>`. Salt SLS System --------------- The primary system used by the Salt state system is the SLS system. SLS stands for **S**\ a\ **L**\ t **S**\ tate. The Salt States are files which contain the information about how to configure Salt minions. The states are laid out in a directory tree and can be written in many different formats. The contents of the files and the way they are laid out is intended to be as simple as possible while allowing for maximum flexibility. The files are laid out in states and contains information about how the minion needs to be configured. SLS File Layout ``````````````` SLS files are laid out in the Salt file server. A simple layout can look like this: .. code-block:: yaml top.sls ssh.sls sshd_config users/init.sls users/admin.sls salt/master.sls web/init.sls The ``top.sls`` file is a key component. The ``top.sls`` files is used to determine which SLS files should be applied to which minions. The rest of the files with the ``.sls`` extension in the above example are state files. Files without a ``.sls`` extensions are seen by the Salt master as files that can be downloaded to a Salt minion. States are translated into dot notation. For example, the ``ssh.sls`` file is seen as the ssh state and the ``users/admin.sls`` file is seen as the users.admin state. Files named ``init.sls`` are translated to be the state name of the parent directory, so the ``web/init.sls`` file translates to the ``web`` state. In Salt, everything is a file; there is no "magic translation" of files and file types. This means that a state file can be distributed to minions just like a plain text or binary file. SLS Files ````````` The Salt state files are simple sets of data. Since SLS files are just data they can be represented in a number of different ways. The default format is YAML generated from a Jinja template. This allows for the states files to have all the language constructs of Python and the simplicity of YAML. State files can then be complicated Jinja templates that translate down to YAML, or just plain and simple YAML files. The State files are simply common data structures such as dictionaries and lists, constructed using a templating language such as YAML. Here is an example of a Salt State: .. code-block:: yaml vim: pkg.installed: [] salt: pkg.latest: - name: salt service.running: - names: - salt-master - salt-minion - require: - pkg: salt - watch: - file: /etc/salt/minion /etc/salt/minion: file.managed: - source: salt://salt/minion - user: root - group: root - mode: 644 - require: - pkg: salt This short stanza will ensure that vim is installed, Salt is installed and up to date, the salt-master and salt-minion daemons are running and the Salt minion configuration file is in place. It will also ensure everything is deployed in the right order and that the Salt services are restarted when the watched file updated. The Top File ```````````` The top file controls the mapping between minions and the states which should be applied to them. The top file specifies which minions should have which SLS files applied and which environments they should draw those SLS files from. The top file works by specifying environments on the top-level. Each environment contains :ref:`target expressions <targeting>` to match minions. Finally, each target expression contains a list of Salt states to apply to matching minions: .. code-block:: yaml base: '*': - salt - users - users.admin 'saltmaster.*': - match: pcre - salt.master This above example uses the base environment which is built into the default Salt setup. The base environment has target expressions. The first one matches all minions, and the SLS files below it apply to all minions. The second expression is a regular expression that will match all minions with an ID matching ``saltmaster.*`` and specifies that for those minions, the salt.master state should be applied. .. important:: Since version 2014.7.0, the default matcher (when one is not explicitly defined as in the second expression in the above example) is the :ref:`compound <targeting-compound>` matcher. Since this matcher parses individual words in the expression, minion IDs containing spaces will not match properly using this matcher. Therefore, if your target expression is designed to match a minion ID containing spaces, it will be necessary to specify a different match type (such as ``glob``). For example: .. code-block:: yaml base: 'test minion': - match: glob - foo - bar - baz A full table of match types available in the top file can be found :ref:`here <top-file-match-types>`. .. _reloading-modules: Reloading Modules ----------------- Some Salt states require that specific packages be installed in order for the module to load. As an example the :mod:`pip <salt.states.pip_state>` state module requires the `pip`_ package for proper name and version parsing. In most of the common cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require just that package which was installed. On some edge-cases salt might need to be told to reload the modules. Consider the following state file which we'll call ``pep8.sls``: .. code-block:: yaml python-pip: cmd.run: - name: | easy_install --script-dir=/usr/bin -U pip - cwd: / pep8: pip.installed: - require: - cmd: python-pip The above example installs `pip`_ using ``easy_install`` from `setuptools`_ and installs `pep8`_ using :mod:`pip <salt.states.pip_state>`, which, as told earlier, requires `pip`_ to be installed system-wide. Let's execute this state: .. code-block:: bash salt-call state.apply pep8 The execution output would be something like: .. code-block:: text ---------- State: - pip Name: pep8 Function: installed Result: False Comment: State pip.installed found in sls pep8 is unavailable Changes: Summary ------------ Succeeded: 1 Failed: 1 ------------ Total: 2 If we executed the state again the output would be: .. code-block:: text ---------- State: - pip Name: pep8 Function: installed Result: True Comment: Package was successfully installed Changes: pep8==1.4.6: Installed Summary ------------ Succeeded: 2 Failed: 0 ------------ Total: 2 Since we installed `pip`_ using :mod:`cmd <salt.states.cmd>`, Salt has no way to know that a system-wide package was installed. On the second execution, since the required `pip`_ package was installed, the state executed correctly. .. note:: Salt does not reload modules on every state run because doing so would greatly slow down state execution. So how do we solve this *edge-case*? ``reload_modules``! ``reload_modules`` is a boolean option recognized by salt on **all** available states which forces salt to reload its modules once a given state finishes. The modified state file would now be: .. code-block:: yaml python-pip: cmd.run: - name: | easy_install --script-dir=/usr/bin -U pip - cwd: / - reload_modules: true pep8: pip.installed: - require: - cmd: python-pip Let's run it, once: .. code-block:: bash salt-call state.apply pep8 The output is: .. code-block:: text ---------- State: - pip Name: pep8 Function: installed Result: True Comment: Package was successfully installed Changes: pep8==1.4.6: Installed Summary ------------ Succeeded: 2 Failed: 0 ------------ Total: 2 .. _`pip`: https://pypi.org/project/pip/ .. _`pep8`: https://pypi.org/project/pep8/ .. _`setuptools`: https://pypi.org/project/setuptools/ .. _`runners`: /ref/runners
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/states/index.rst
0.778144
0.878419
index.rst
pypi
.. _requisites: =========================================== Requisites and Other Global State Arguments =========================================== Requisites ========== The Salt requisite system is used to create relationships between states. This provides a method to easily define inter-dependencies between states. These dependencies are expressed by declaring the relationships using state names and IDs or names. The generalized form of a requisite target is ``<state name>: <ID or name>``. The specific form is defined as a :ref:`Requisite Reference <requisite-reference>`. A common use-case for requisites is ensuring a package has been installed before trying to ensure the service is running. In the following example, Salt will ensure nginx has been installed before trying to manage the service. If the package could not be installed, Salt will not try to manage the service. .. code-block:: yaml nginx: pkg.installed: - name: nginx-light service.running: - enable: True - require: - pkg: nginx Without the requisite defined, salt would attempt to install the package and then attempt to manage the service even if the installation failed. These requisites always form dependencies in a predictable single direction. Each requisite has an alternate :ref:`<requisite>_in <requisites-in>` form that can be used to establish a "reverse" dependency--useful in for loops. In the end, a single dependency map is created and everything is executed in a finite and predictable order. .. _requisites-matching: Requisite matching ------------------ Requisites typically need two pieces of information for matching: * The state module name (e.g. ``pkg`` or ``service``) * The state identifier (e.g. ``nginx`` or ``/etc/nginx/nginx.conf``) .. code-block:: yaml nginx: pkg.installed: [] file.managed: - name: /etc/nginx/nginx.conf service.running: - require: - pkg: nginx - file: /etc/nginx/nginx.conf Glob matching in requisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.9.8 Glob matching is supported in requisites. This is mostly useful for file changes. In the example below, a change in ``/etc/apache2/httpd.conf`` or ``/etc/apache2/sites-available/default.conf`` will reload/restart the service: .. code-block:: yaml apache2: service.running: - watch: - file: /etc/apache2/* Omitting state module in requisites ----------------------------------- .. versionadded:: 2016.3.0 In version 2016.3.0, the state module name was made optional. If the state module is omitted, all states matching the ID will be required, regardless of which module they are using. .. code-block:: yaml - require: - vim State target matching ~~~~~~~~~~~~~~~~~~~~~ In order to understand how state targets are matched, it is helpful to know :ref:`how the state compiler is working <compiler-ordering>`. Consider the following example: .. code-block:: yaml Deploy server package: file.managed: - name: /usr/local/share/myapp.tar.xz - source: salt://myapp.tar.xz Extract server package: archive.extracted: - name: /usr/local/share/myapp - source: /usr/local/share/myapp.tar.xz - archive_format: tar - onchanges: - file: Deploy server package The first formula is converted to a dictionary which looks as follows (represented as YAML, some properties omitted for simplicity) as `High Data`: .. code-block:: yaml Deploy server package: file: - managed - name: /usr/local/share/myapp.tar.xz - source: salt://myapp.tar.xz The ``file.managed`` format used in the formula is essentially syntactic sugar: at the end, the target is ``file``, which is used in the ``Extract server package`` state above. Identifier matching ~~~~~~~~~~~~~~~~~~~ Requisites match on both the ID Declaration and the ``name`` parameter. This means that, in the "Deploy server package" example above, a ``require`` requisite would match with ``Deploy server package`` *or* ``/usr/local/share/myapp.tar.xz``, so either of the following versions for "Extract server package" is correct: .. code-block:: yaml # (Archive arguments omitted for simplicity) # Match by ID declaration Extract server package: archive.extracted: - onchanges: - file: Deploy server package # Match by name parameter Extract server package: archive.extracted: - onchanges: - file: /usr/local/share/myapp.tar.xz Omitting state module in requisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2016.3.0 In version 2016.3.0, the state module name was made optional. If the state module is omitted, all states matching the ID will be required, regardless of which module they are using. .. code-block:: yaml - require: - vim Requisites Types ---------------- All requisite types have a corresponding :ref:`<requisite>_in <requisites-in>` form: * :ref:`require <requisites-require>`: Requires that a list of target states succeed before execution * :ref:`onchanges <requisites-onchanges>`: Execute if any target states succeed with changes * :ref:`watch <requisites-watch>`: Similar to ``onchanges``; modifies state behavior using ``mod_watch`` * :ref:`listen <requisites-listen>`: Similar to ``onchanges``; delays execution to end of state run using ``mod_watch`` * :ref:`prereq <requisites-prereq>`: Execute prior to target state if target state expects to produce changes * :ref:`onfail <requisites-onfail>`: Execute only if a target state fails * :ref:`use <requisites-use>`: Copy arguments from another state Several requisite types have a corresponding :ref:`requisite_any <requisites-any>` form: * ``require_any`` * ``watch_any`` * ``onchanges_any`` * ``onfail_any`` Lastly, onfail has one special ``onfail_all`` form to account for when `AND` logic is desired instead of the default `OR` logic of onfail/onfail_any (which are equivalent). All requisites define specific relationships and always work with the dependency logic defined :ref:`above <requisites-matching>`. .. _requisites-require: require ~~~~~~~ The use of ``require`` builds a dependency that prevents a state from executing until all required states execute successfully. If any required state fails, then the state will fail due to requisites. In the following example, the ``service`` state will not be checked unless both ``file`` states execute without failure. .. code-block:: yaml nginx: service.running: - require: - file: /etc/nginx/nginx.conf - file: /etc/nginx/conf.d/ssl.conf Require SLS File ++++++++++++++++ As of Salt 0.16.0, it is possible to require an entire sls file. Do this by first including the sls file and then setting a state to ``require`` the included sls file: .. code-block:: yaml include: - foo bar: pkg.installed: - require: - sls: foo This will add a ``require`` to all of the state declarations found in the given sls file. This means that ``bar`` will ``require`` every state within ``foo``. This makes it very easy to batch large groups of states easily in any requisite statement. .. _requisites-onchanges: onchanges ~~~~~~~~~ .. versionadded:: 2014.7.0 The ``onchanges`` requisite makes a state only apply if the required states generate changes, and if the watched state's "result" is ``True`` (does not fail). This can be a useful way to execute a post hook after changing aspects of a system. If a state has multiple ``onchanges`` requisites then the state will trigger if any of the watched states changes. .. code-block:: yaml myservice: file.managed: - name: /etc/myservice/myservice.conf - source: salt://myservice/files/myservice.conf cmd.run: - name: /usr/local/sbin/run-build - onchanges: - file: /etc/myservice/myservice.conf In the example above, ``cmd.run`` will run only if there are changes in the ``file.managed`` state. An easy mistake to make is using ``onchanges_in`` when ``onchanges`` is the correct choice, as seen in this next example. .. code-block:: yaml myservice: file.managed: - name: /etc/myservice/myservice.conf - source: salt://myservice/files/myservice.conf cmd.run: - name: /usr/local/sbin/run-build - onchanges_in: # <-- broken logic - file: /etc/myservice/myservice.conf This will set up a requisite relationship in which the ``cmd.run`` state always executes, and the ``file.managed`` state only executes if the ``cmd.run`` state has changes (which it always will, since the ``cmd.run`` state includes the command results as changes). It may semantically seem like the ``cmd.run`` state should only run when there are changes in the file state, but remember that requisite relationships involve one state watching another state, and a :ref:`requisite_in <requisites-onchanges-in>` does the opposite: it forces the specified state to watch the state with the ``requisite_in``. .. _requisites-watch: .. note:: An ``onchanges`` requisite has no effect on SLS requisites (monitoring for changes in an included SLS). Only the individual state IDs from an included SLS can be monitored. watch ~~~~~ A ``watch`` requisite is used to add additional behavior when there are changes in other states. This is done using the ``mod_watch`` function available from the execution module and will execute any time a watched state changes. .. note:: If a state should only execute when another state has changes, and otherwise do nothing, the ``onchanges`` requisite should be used instead of ``watch``. ``watch`` is designed to add *additional* behavior when there are changes, but otherwise the state executes normally. .. note:: A ``watch`` requisite has no effect on SLS requisites (watching for changes in an included SLS). Only the individual state IDs from an included SLS can be watched. A good example of using ``watch`` is with a :mod:`service.running <salt.states.service.running>` state. When a service watches a state, then the service is reloaded/restarted when the watched state changes, in addition to Salt ensuring that the service is running. .. code-block:: yaml ntpd: service.running: - watch: - file: /etc/ntp.conf file.managed: - name: /etc/ntp.conf - source: salt://ntp/files/ntp.conf Another useful example of ``watch`` is using salt to ensure a configuration file is present and in a correct state, ensure the service is running, and trigger ``service nginx reload`` instead of ``service nginx restart`` in order to avoid dropping any connections. .. code-block:: yaml nginx: service.running: - reload: True - watch: - file: nginx file.managed: - name: /etc/nginx/conf.d/tls-settings.conf - source: salt://nginx/files/tls-settings.conf .. note:: Not all state modules contain ``mod_watch``. If ``mod_watch`` is absent from the watching state module, the ``watch`` requisite behaves exactly like a ``require`` requisite. The state containing the ``watch`` requisite is defined as the watching state. The state specified in the ``watch`` statement is defined as the watched state. When the watched state executes, it will return a dictionary containing a key named "changes". Here are two examples of state return dictionaries, shown in json for clarity: .. code-block:: json { "local": { "file_|-/tmp/foo_|-/tmp/foo_|-directory": { "comment": "Directory /tmp/foo updated", "__run_num__": 0, "changes": { "user": "bar" }, "name": "/tmp/foo", "result": true } } } .. code-block:: json { "local": { "pkgrepo_|-salt-minion_|-salt-minion_|-managed": { "comment": "Package repo 'salt-minion' already configured", "__run_num__": 0, "changes": {}, "name": "salt-minion", "result": true } } } If the "result" of the watched state is ``True``, the watching state *will execute normally*, and if it is ``False``, the watching state will never run. This part of ``watch`` mirrors the functionality of the ``require`` requisite. If the "result" of the watched state is ``True`` *and* the "changes" key contains a populated dictionary (changes occurred in the watched state), then the ``watch`` requisite can add additional behavior. This additional behavior is defined by the ``mod_watch`` function within the watching state module. If the ``mod_watch`` function exists in the watching state module, it will be called *in addition to* the normal watching state. The return data from the ``mod_watch`` function is what will be returned to the master in this case; the return data from the main watching function is discarded. If the "changes" key contains an empty dictionary, the ``watch`` requisite acts exactly like the ``require`` requisite (the watching state will execute if "result" is ``True``, and fail if "result" is ``False`` in the watched state). .. note:: If the watching state ``changes`` key contains values, then ``mod_watch`` will not be called. If you're using ``watch`` or ``watch_in`` then it's a good idea to have a state that only enforces one attribute - such as splitting out ``service.running`` into its own state and have ``service.enabled`` in another. One common source of confusion is expecting ``mod_watch`` to be called for every necessary change. You might be tempted to write something like this: .. code-block:: yaml httpd: service.running: - enable: True - watch: - file: httpd-config httpd-config: file.managed: - name: /etc/httpd/conf/httpd.conf - source: salt://httpd/files/apache.conf If your service is already running but not enabled, you might expect that Salt will be able to tell that since the config file changed your service needs to be restarted. This is not the case. Because the service needs to be enabled, that change will be made and ``mod_watch`` will never be triggered. In this case, changes to your ``apache.conf`` will fail to be loaded. If you want to ensure that your service always reloads the correct way to handle this is either ensure that your service is not running before applying your state, or simply make sure that ``service.running`` is in a state on its own: .. code-block:: yaml enable-httpd: service.enabled: - name: httpd start-httpd: service.running: - name: httpd - watch: - file: httpd-config httpd-config: file.managed: - name: /etc/httpd/conf/httpd.conf - source: salt://httpd/files/apache.conf Now that ``service.running`` is its own state, changes to ``service.enabled`` will no longer prevent ``mod_watch`` from getting triggered, so your ``httpd`` service will get restarted like you want. .. _requisites-listen: listen ~~~~~~ .. versionadded:: 2014.7.0 A ``listen`` requisite is used to trigger the ``mod_watch`` function of a state module. Rather than modifying execution order, the ``mod_watch`` state created by ``listen`` will execute at the end of the state run. .. code-block:: yaml restart-apache2: service.running: - name: apache2 - listen: - file: /etc/apache2/apache2.conf configure-apache2: file.managed: - name: /etc/apache2/apache2.conf - source: salt://apache2/apache2.conf This example will cause apache2 to restart when the apache2.conf file is changed, but the apache2 restart will happen at the end of the state run. .. code-block:: yaml restart-apache2: service.running: - name: apache2 configure-apache2: file.managed: - name: /etc/apache2/apache2.conf - source: salt://apache2/apache2.conf - listen_in: - service: apache2 This example does the same as the above example, but puts the state argument on the file resource, rather than the service resource. .. _requisites-prereq: prereq ~~~~~~ .. versionadded:: 0.16.0 The ``prereq`` requisite works similar to ``onchanges`` except that it uses the result from ``test=True`` on the observed state to determine if it should run prior to the observed state being run. The best way to define how ``prereq`` operates is displayed in the following practical example: When a service should be shut down because underlying code is going to change, the service should be off-line while the update occurs. In this example, ``graceful-down`` is the pre-requiring state and ``site-code`` is the pre-required state. .. code-block:: yaml graceful-down: cmd.run: - name: service apache graceful - prereq: - file: site-code site-code: file.recurse: - name: /opt/site_code - source: salt://site/code In this case, the apache server will only be shut down if the site-code state expects to deploy fresh code via the file.recurse call. The site-code deployment will only be executed if the graceful-down run completes successfully. When a ``prereq`` requisite is evaluated, the pre-required state reports if it expects to have any changes. It does this by running the pre-required single state as a test-run by enabling ``test=True``. This test-run will return a dictionary containing a key named "changes". (See the ``watch`` section above for examples of "changes" dictionaries.) If the "changes" key contains a populated dictionary, it means that the pre-required state expects changes to occur when the state is actually executed, as opposed to the test-run. The pre-requiring state will now run. If the pre-requiring state executes successfully, the pre-required state will then execute. If the pre-requiring state fails, the pre-required state will not execute. If the "changes" key contains an empty dictionary, this means that changes are not expected by the pre-required state. Neither the pre-required state nor the pre-requiring state will run. .. _requisites-onfail: onfail ~~~~~~ .. versionadded:: 2014.7.0 The ``onfail`` requisite allows for reactions to happen strictly as a response to the failure of another state. This can be used in a number of ways, such as sending a notification or attempting an alternate task or thread of tasks when an important state fails. The ``onfail`` requisite is applied in the same way as ``require`` and ``watch``: .. code-block:: yaml primary_mount: mount.mounted: - name: /mnt/share - device: 10.0.0.45:/share - fstype: nfs backup_mount: mount.mounted: - name: /mnt/share - device: 192.168.40.34:/share - fstype: nfs - onfail: - mount: primary_mount .. code-block:: yaml build_site: cmd.run: - name: /srv/web/app/build_site notify-build_failure: hipchat.send_message: - room_id: 123456 - message: "Building website fail on {{ salt.grains.get('id') }}" The default behavior of the ``onfail`` when multiple requisites are listed is the opposite of other requisites in the salt state engine, it acts by default like ``any()`` instead of ``all()``. This means that when you list multiple onfail requisites on a state, if *any* fail the requisite will be satisfied. If you instead need *all* logic to be applied, you can use ``onfail_all`` form: .. code-block:: yaml test_site_a: cmd.run: - name: ping -c1 10.0.0.1 test_site_b: cmd.run: - name: ping -c1 10.0.0.2 notify_site_down: hipchat.send_message: - room_id: 123456 - message: "Both primary and backup sites are down!" - onfail_all: - cmd: test_site_a - cmd: test_site_b In this contrived example `notify_site_down` will run when both 10.0.0.1 and 10.0.0.2 fail to respond to ping. .. note:: Setting failhard (:ref:`globally <global-failhard>` or in :ref:`the failing state <state-level-failhard>`) to ``True`` will cause ``onfail``, ``onfail_in`` and ``onfail_any`` requisites to be ignored. If you want to combine a global failhard set to True with ``onfail``, ``onfail_in`` or ``onfail_any``, you will have to explicitly set failhard to ``False`` (overriding the global setting) in the state that could fail. .. note:: Beginning in the ``2016.11.0`` release of Salt, ``onfail`` uses OR logic for multiple listed ``onfail`` requisites. Prior to the ``2016.11.0`` release, ``onfail`` used AND logic. See `Issue #22370`_ for more information. Beginning in the ``Neon`` release of Salt, a new ``onfail_all`` requisite form is available if AND logic is desired. .. _Issue #22370: https://github.com/saltstack/salt/issues/22370 .. _requisites-use: use ~~~ The ``use`` requisite is used to inherit the arguments passed in another id declaration. This is useful when many files need to have the same defaults. .. code-block:: yaml /etc/foo.conf: file.managed: - source: salt://foo.conf - template: jinja - mkdirs: True - user: apache - group: apache - mode: 755 /etc/bar.conf: file.managed: - source: salt://bar.conf - use: - file: /etc/foo.conf The ``use`` statement was developed primarily for the networking states but can be used on any states in Salt. This makes sense for the networking state because it can define a long list of options that need to be applied to multiple network interfaces. The ``use`` statement does not inherit the requisites arguments of the targeted state. This means also a chain of ``use`` requisites would not inherit inherited options. .. _requisites-in: .. _requisites-require-in: .. _requisites-watch-in: .. _requisites-onchanges-in: The _in version of requisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Direct requisites form a dependency in a single direction. This makes it possible for Salt to detect cyclical dependencies and helps prevent faulty logic. In some cases, often in loops, it is desirable to establish a dependency in the opposite direction. All direct requisites have an ``_in`` counterpart that behaves the same but forms the dependency in the opposite direction. The following sls examples will produce the exact same dependency mapping. .. code-block:: yaml httpd: pkg.installed: [] service.running: - require: - pkg: httpd .. code-block:: yaml httpd: pkg.installed: - require_in: - service: httpd service.running: [] In the following example, Salt will not try to manage the nginx service or any configuration files unless the nginx package is installed because of the ``pkg: nginx`` requisite. .. code-block:: yaml nginx: pkg.installed: [] service.running: - enable: True - reload: True - require: - pkg: nginx php.sls .. code-block:: yaml include: - http php: pkg.installed: - require_in: - service: httpd mod_python.sls .. code-block:: yaml include: - http mod_python: pkg.installed: - require_in: - service: httpd Now the httpd server will only start if both php and mod_python are first verified to be installed. Thus allowing for a requisite to be defined "after the fact". .. code-block:: sls {% for cfile in salt.pillar.get('nginx:config_files') %} /etc/nginx/conf.d/{{ cfile }}: file.managed: - source: salt://nginx/configs/{{ cfile }} - require: - pkg: nginx - listen_in: - service: nginx {% endfor %} In this scenario, ``listen_in`` is a better choice than ``require_in`` because the ``listen`` requisite will trigger ``mod_watch`` behavior which will wait until the end of state execution and then reload the service. .. _requisites-any: .. _requisites-onchanges_any: .. _requisites-require_any: .. _requisites-onfail_any: The _any version of requisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 2018.3.0 Some requisites have an ``_any`` counterpart that changes the requisite behavior from ``all()`` to ``any()``. .. code-block:: yaml A: cmd.run: - name: echo A - require_any: - cmd: B - cmd: C B: cmd.run: - name: echo B C: cmd.run: - name: /bin/false In this example `A` will run because at least one of the requirements specified, `B` or `C`, will succeed. .. code-block:: yaml myservice: pkg.installed /etc/myservice/myservice.conf: file.managed: - source: salt://myservice/files/myservice.conf /etc/yourservice/yourservice.conf: file.managed: - source: salt://yourservice/files/yourservice.conf /usr/local/sbin/myservice/post-changes-hook.sh cmd.run: - onchanges_any: - file: /etc/myservice/myservice.conf - file: /etc/your_service/yourservice.conf - require: - pkg: myservice In this example, `cmd.run` would be run only if either of the `file.managed` states generated changes and at least one of the watched state's "result" is ``True``. .. _requisites-fire-event: Altering States --------------- The state altering system is used to make sure that states are evaluated exactly as the user expects. It can be used to double check that a state preformed exactly how it was expected to, or to make 100% sure that a state only runs under certain conditions. The use of unless or onlyif options help make states even more stateful. The ``check_cmd`` option helps ensure that the result of a state is evaluated correctly. reload ~~~~~~ ``reload_modules`` is a boolean option that forces salt to reload its modules after a state finishes. ``reload_pillar`` and ``reload_grains`` can also be set. See :ref:`Reloading Modules <reloading-modules>`. .. code-block:: yaml grains_refresh: module.run: - name: saltutil.refresh_grains - reload_grains: true grains_read: module.run: - name: grains.items .. _unless-requisite: unless ~~~~~~ .. versionadded:: 2014.7.0 The ``unless`` requisite specifies that a state should only run when any of the specified commands return ``False``. The ``unless`` requisite operates as NAND and is useful in giving more granular control over when a state should execute. **NOTE**: Under the hood ``unless`` calls ``cmd.retcode`` with ``python_shell=True``. This means the commands referenced by ``unless`` will be parsed by a shell, so beware of side-effects as this shell will be run with the same privileges as the salt-minion. Also be aware that the boolean value is determined by the shell's concept of ``True`` and ``False``, rather than Python's concept of ``True`` and ``False``. .. code-block:: yaml vim: pkg.installed: - unless: - rpm -q vim-enhanced - ls /usr/bin/vim In the example above, the state will only run if either the vim-enhanced package is not installed (returns ``False``) or if /usr/bin/vim does not exist (returns ``False``). The state will run if both commands return ``False``. However, the state will not run if both commands return ``True``. Unless checks are resolved for each name to which they are associated. For example: .. code-block:: yaml deploy_app: cmd.run: - names: - first_deploy_cmd - second_deploy_cmd - unless: some_check In the above case, ``some_check`` will be run prior to _each_ name -- once for ``first_deploy_cmd`` and a second time for ``second_deploy_cmd``. .. versionchanged:: 3000 The ``unless`` requisite can take a module as a dictionary field in unless. The dictionary must contain an argument ``fun`` which is the module that is being run, and everything else must be passed in under the args key or will be passed as individual kwargs to the module function. .. code-block:: yaml install apache on debian based distros: cmd.run: - name: make install - cwd: /path/to/dir/whatever-2.1.5/ - unless: - fun: file.file_exists path: /usr/local/bin/whatever .. code-block:: yaml set mysql root password: debconf.set: - name: mysql-server-5.7 - data: 'mysql-server/root_password': {'type': 'password', 'value': {{pillar['mysql.pass']}} } - unless: - fun: pkg.version args: - mysql-server-5.7 .. versionchanged:: sodium For modules which return a deeper data structure, the ``get_return`` key can be used to access results. .. code-block:: yaml test: test.nop: - name: foo - unless: - fun: consul.get consul_url: http://127.0.0.1:8500 key: not-existing get_return: res .. _onlyif-requisite: onlyif ~~~~~~ .. versionadded:: 2014.7.0 The ``onlyif`` requisite specifies that if each command listed in ``onlyif`` returns ``True``, then the state is run. If any of the specified commands return ``False``, the state will not run. **NOTE**: Under the hood ``onlyif`` calls ``cmd.retcode`` with ``python_shell=True``. This means the commands referenced by ``onlyif`` will be parsed by a shell, so beware of side-effects as this shell will be run with the same privileges as the salt-minion. Also be aware that the boolean value is determined by the shell's concept of ``True`` and ``False``, rather than Python's concept of ``True`` and ``False``. .. code-block:: yaml stop-volume: module.run: - name: glusterfs.stop_volume - m_name: work - onlyif: - gluster volume status work - order: 1 remove-volume: module.run: - name: glusterfs.delete - m_name: work - onlyif: - gluster volume info work - watch: - cmd: stop-volume The above example ensures that the stop_volume and delete modules only run if the gluster commands return a 0 ret value. .. versionchanged:: 3000 The ``onlyif`` requisite can take a module as a dictionary field in onlyif. The dictionary must contain an argument ``fun`` which is the module that is being run, and everything else must be passed in under the args key or will be passed as individual kwargs to the module function. .. code-block:: yaml install apache on redhat based distros: pkg.latest: - name: httpd - onlyif: - fun: match.grain tgt: 'os_family:RedHat' install apache on debian based distros: pkg.latest: - name: apache2 - onlyif: - fun: match.grain tgt: 'os_family:Debian' .. code-block:: yaml arbitrary file example: file.touch: - name: /path/to/file - onlyif: - fun: file.search args: - /etc/crontab - 'entry1' .. versionchanged:: sodium For modules which return a deeper data structure, the ``get_return`` key can be used to access results. .. code-block:: yaml test: test.nop: - name: foo - onlyif: - fun: consul.get consul_url: http://127.0.0.1:8500 key: does-exist get_return: res .. _creates-requisite: Creates ------- .. versionadded:: 3001 The ``creates`` requisite specifies that a state should only run when any of the specified files do not already exist. Like ``unless``, ``creates`` requisite operates as NAND and is useful in giving more granular control over when a state should execute. This was previously used by the :mod:`cmd <salt.states.cmd>` and :mod:`docker_container <salt.states.docker_container>` states. .. code-block:: yaml contrived creates example: file.touch: - name: /path/to/file - creates: /path/to/file ``creates`` also accepts a list of files, in which case this state will run if **any** of the files do not exist: .. code-block:: yaml creates list: file.cmd: - name: /path/to/command - creates: - /path/file - /path/file2 runas ~~~~~ .. versionadded:: 2017.7.0 The ``runas`` global option is used to set the user which will be used to run the command in the ``cmd.run`` module. .. code-block:: yaml django: pip.installed: - name: django >= 1.6, <= 1.7 - runas: daniel - require: - pkg: python-pip In the above state, the pip command run by ``cmd.run`` will be run by the daniel user. runas_password ~~~~~~~~~~~~~~ .. versionadded:: 2017.7.2 The ``runas_password`` global option is used to set the password used by the runas global option. This is required by ``cmd.run`` on Windows when ``runas`` is specified. It will be set when ``runas_password`` is defined in the state. .. code-block:: yaml run_script: cmd.run: - name: Powershell -NonInteractive -ExecutionPolicy Bypass -File C:\\Temp\\script.ps1 - runas: frank - runas_password: supersecret In the above state, the Powershell script run by ``cmd.run`` will be run by the frank user with the password ``supersecret``. check_cmd ~~~~~~~~~ .. versionadded:: 2014.7.0 Check Command is used for determining that a state did or did not run as expected. **NOTE**: Under the hood ``check_cmd`` calls ``cmd.retcode`` with ``python_shell=True``. This means the command will be parsed by a shell, so beware of side-effects as this shell will be run with the same privileges as the salt-minion. .. code-block:: yaml comment-repo: file.replace: - name: /etc/yum.repos.d/fedora.repo - pattern: '^enabled=0' - repl: enabled=1 - check_cmd: - "! grep 'enabled=0' /etc/yum.repos.d/fedora.repo" This will attempt to do a replace on all ``enabled=0`` in the .repo file, and replace them with ``enabled=1``. The ``check_cmd`` is just a bash command. It will do a grep for ``enabled=0`` in the file, and if it finds any, it will return a 0, which will be inverted by the leading ``!``, causing ``check_cmd`` to set the state as failed. If it returns a 1, meaning it didn't find any ``enabled=0``, it will be inverted by the leading ``!``, returning a 0, and declaring the function succeeded. **NOTE**: This requisite ``check_cmd`` functions differently than the ``check_cmd`` of the ``file.managed`` state. Overriding Checks ~~~~~~~~~~~~~~~~~ There are two commands used for the above checks. ``mod_run_check`` is used to check for ``onlyif`` and ``unless``. If the goal is to override the global check for these to variables, include a ``mod_run_check`` in the salt/states/ file. ``mod_run_check_cmd`` is used to check for the check_cmd options. To override this one, include a ``mod_run_check_cmd`` in the states file for the state. Fire Event Notifications ======================== .. versionadded:: 2015.8.0 The `fire_event` option in a state will cause the minion to send an event to the Salt Master upon completion of that individual state. The following example will cause the minion to send an event to the Salt Master with a tag of `salt/state_result/20150505121517276431/dasalt/nano` and the result of the state will be the data field of the event. Notice that the `name` of the state gets added to the tag. .. code-block:: yaml nano_stuff: pkg.installed: - name: nano - fire_event: True In the following example instead of setting `fire_event` to `True`, `fire_event` is set to an arbitrary string, which will cause the event to be sent with this tag: `salt/state_result/20150505121725642845/dasalt/custom/tag/nano/finished` .. code-block:: yaml nano_stuff: pkg.installed: - name: nano - fire_event: custom/tag/nano/finished Retrying States =============== .. versionadded:: 2017.7.0 The retry option in a state allows it to be executed multiple times until a desired result is obtained or the maximum number of attempts have been made. The retry option can be configured by the ``attempts``, ``until``, ``interval``, and ``splay`` parameters. The ``attempts`` parameter controls the maximum number of times the state will be run. If not specified or if an invalid value is specified, ``attempts`` will default to ``2``. The ``until`` parameter defines the result that is required to stop retrying the state. If not specified or if an invalid value is specified, ``until`` will default to ``True`` The ``interval`` parameter defines the amount of time, in seconds, that the system will wait between attempts. If not specified or if an invalid value is specified, ``interval`` will default to ``30``. The ``splay`` parameter allows the ``interval`` to be additionally spread out. If not specified or if an invalid value is specified, ``splay`` defaults to ``0`` (i.e. no splaying will occur). The following example will run the pkg.installed state until it returns ``True`` or it has been run ``5`` times. Each attempt will be ``60`` seconds apart and the interval will be splayed up to an additional ``10`` seconds: .. code-block:: yaml my_retried_state: pkg.installed: - name: nano - retry: attempts: 5 until: True interval: 60 splay: 10 The following example will run the pkg.installed state with all the defaults for ``retry``. The state will run up to ``2`` times, each attempt being ``30`` seconds apart, or until it returns ``True``. .. code-block:: yaml install_nano: pkg.installed: - name: nano - retry: True The following example will run the file.exists state every ``30`` seconds up to ``15`` times or until the file exists (i.e. the state returns ``True``). .. code-block:: yaml wait_for_file: file.exists: - name: /path/to/file - retry: attempts: 15 interval: 30 Return data from a retried state -------------------------------- When a state is retried, the returned output is as follows: The ``result`` return value is the ``result`` from the final run. For example, imagine a state set to ``retry`` up to three times or ``until`` ``True``. If the state returns ``False`` on the first run and then ``True`` on the second, the ``result`` of the state will be ``True``. The ``started`` return value is the ``started`` from the first run. The ``duration`` return value is the total duration of all attempts plus the retry intervals. The ``comment`` return value will include the result and comment from all previous attempts. For example: .. code-block:: yaml wait_for_file: file.exists: - name: /path/to/file - retry: attempts: 10 interval: 2 splay: 5 Would return similar to the following. The state result in this case is ``False`` (file.exist was run 10 times with a 2 second interval, but the file specified did not exist on any run). .. code-block:: none ID: wait_for_file Function: file.exists Result: False Comment: Attempt 1: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 2: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 3: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 4: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 5: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 6: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 7: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 8: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Attempt 9: Returned a result of "False", with the following comment: "Specified path /path/to/file does not exist" Specified path /path/to/file does not exist Started: 09:08:12.903000 Duration: 47000.0 ms Changes:
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/states/requisites.rst
0.882731
0.739422
requisites.rst
pypi
.. _ordering: =============== Ordering States =============== The way in which configuration management systems are executed is a hotly debated topic in the configuration management world. Two major philosophies exist on the subject, to either execute in an imperative fashion where things are executed in the order in which they are defined, or in a declarative fashion where dependencies need to be mapped between objects. Imperative ordering is finite and generally considered easier to write, but declarative ordering is much more powerful and flexible but generally considered more difficult to create. Salt has been created to get the best of both worlds. States are evaluated in a finite order, which guarantees that states are always executed in the same order, and the states runtime is declarative, making Salt fully aware of dependencies via the `requisite` system. .. _ordering_auto_order: State Auto Ordering =================== .. versionadded: 0.17.0 Salt always executes states in a finite manner, meaning that they will always execute in the same order regardless of the system that is executing them. This evaluation order makes it easy to know what order the states will be executed in, but it is important to note that the requisite system will override the ordering defined in the files, and the ``order`` option, described below, will also override the order in which states are executed. This ordering system can be disabled in preference of lexicographic (classic) ordering by setting the ``state_auto_order`` option to ``False`` in the master configuration file. Otherwise, ``state_auto_order`` defaults to ``True``. How compiler ordering is managed is described further in :ref:`compiler-ordering`. .. _ordering_requisites: Requisite Statements ==================== .. note:: The behavior of requisites changed in version 0.9.7 of Salt. This documentation applies to requisites in version 0.9.7 and later. Often when setting up states any single action will require or depend on another action. Salt allows for the building of relationships between states with requisite statements. A requisite statement ensures that the named state is evaluated before the state requiring it. There are three types of requisite statements in Salt, **require**, **watch**, and **prereq**. These requisite statements are applied to a specific state declaration: .. code-block:: yaml httpd: pkg.installed: [] file.managed: - name: /etc/httpd/conf/httpd.conf - source: salt://httpd/httpd.conf - require: - pkg: httpd In this example, the **require** requisite is used to declare that the file /etc/httpd/conf/httpd.conf should only be set up if the pkg state executes successfully. The requisite system works by finding the states that are required and executing them before the state that requires them. Then the required states can be evaluated to see if they have executed correctly. Require statements can refer to any state defined in Salt. The basic examples are `pkg`, `service`, and `file`, but any used state can be referenced. In addition to state declarations such as pkg, file, etc., **sls** type requisites are also recognized, and essentially allow 'chaining' of states. This provides a mechanism to ensure the proper sequence for complex state formulas, especially when the discrete states are split or groups into separate sls files: .. code-block:: yaml include: - network httpd: pkg.installed: [] service.running: - require: - pkg: httpd - sls: network In this example, the httpd service running state will not be applied (i.e., the httpd service will not be started) unless both the httpd package is installed AND the network state is satisfied. .. note:: Requisite matching Requisites match on both the ID Declaration and the ``name`` parameter. Therefore, if using the ``pkgs`` or ``sources`` argument to install a list of packages in a pkg state, it's important to note that it is impossible to match an individual package in the list, since all packages are installed as a single state. Multiple Requisites ------------------- The requisite statement is passed as a list, allowing for the easy addition of more requisites. Both requisite types can also be separately declared: .. code-block:: yaml httpd: pkg.installed: [] service.running: - enable: True - watch: - file: /etc/httpd/conf/httpd.conf - require: - pkg: httpd - user: httpd - group: httpd file.managed: - name: /etc/httpd/conf/httpd.conf - source: salt://httpd/httpd.conf - require: - pkg: httpd user.present: [] group.present: [] In this example, the httpd service is only going to be started if the package, user, group, and file are executed successfully. Requisite Documentation ----------------------- For detailed information on each of the individual requisites, :ref:`please look here. <requisites>` The Order Option ================ Before using the `order` option, remember that the majority of state ordering should be done with a :ref:`requisite-declaration`, and that a requisite declaration will override an `order` option, so a state with order option should not require or required by other states. The order option is used by adding an order number to a state declaration with the option `order`: .. code-block:: yaml vim: pkg.installed: - order: 1 By adding the order option to `1` this ensures that the vim package will be installed in tandem with any other state declaration set to the order `1`. Any state declared without an order option will be executed after all states with order options are executed. But this construct can only handle ordering states from the beginning. Certain circumstances will present a situation where it is desirable to send a state to the end of the line. To do this, set the order to ``last``: .. code-block:: yaml vim: pkg.installed: - order: last
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/states/ordering.rst
0.924308
0.763726
ordering.rst
pypi
.. _file-server: ================ Salt File Server ================ Salt comes with a simple file server suitable for distributing files to the Salt minions. The file server is a stateless ZeroMQ server that is built into the Salt master. The main intent of the Salt file server is to present files for use in the Salt state system. With this said, the Salt file server can be used for any general file transfer from the master to the minions. .. toctree:: :glob: * The cp Module ------------- The cp module is the home of minion side file server operations. The cp module is used by the Salt state system, salt-cp, and can be used to distribute files presented by the Salt file server. Escaping Special Characters ``````````````````````````` The ``salt://`` url format can potentially contain a query string, for example ``salt://dir/file.txt?saltenv=base``. You can prevent the fileclient/fileserver from interpreting ``?`` as the initial token of a query string by referencing the file with ``salt://|`` rather than ``salt://``. .. code-block:: yaml /etc/marathon/conf/?checkpoint: file.managed: - source: salt://|hw/config/?checkpoint - makedirs: True Environments ```````````` Since the file server is made to work with the Salt state system, it supports environments. The environments are defined in the master config file and when referencing an environment the file specified will be based on the root directory of the environment. get_file ```````` The cp.get_file function can be used on the minion to download a file from the master, the syntax looks like this: .. code-block:: bash salt '*' cp.get_file salt://vimrc /etc/vimrc This will instruct all Salt minions to download the vimrc file and copy it to /etc/vimrc Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their OS grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). To use compression, use the ``gzip`` named argument. Valid values are integers from 1 to 9, where 1 is the lightest compression and 9 the heaviest. In other words, 1 uses the least CPU on the master (and minion), while 9 uses the most. .. code-block:: bash salt '*' cp.get_file salt://vimrc /etc/vimrc gzip=5 Finally, note that by default cp.get_file does *not* create new destination directories if they do not exist. To change this, use the ``makedirs`` argument: .. code-block:: bash salt '*' cp.get_file salt://vimrc /etc/vim/vimrc makedirs=True In this example, /etc/vim/ would be created if it didn't already exist. get_dir ``````` The cp.get_dir function can be used on the minion to download an entire directory from the master. The syntax is very similar to get_file: .. code-block:: bash salt '*' cp.get_dir salt://etc/apache2 /etc cp.get_dir supports template rendering and gzip compression arguments just like get_file: .. code-block:: bash salt '*' cp.get_dir salt://etc/{{pillar.webserver}} /etc gzip=5 template=jinja File Server Client Instance --------------------------- A client instance is available which allows for modules and applications to be written which make use of the Salt file server. The file server uses the same authentication and encryption used by the rest of the Salt system for network communication. fileclient Module ````````````````` The ``salt/fileclient.py`` module is used to set up the communication from the minion to the master. When creating a client instance using the fileclient module, the minion configuration needs to be passed in. When using the fileclient module from within a minion module the built in ``__opts__`` data can be passed: .. code-block:: python import salt.minion import salt.fileclient def get_file(path, dest, saltenv="base"): """ Used to get a single file from the Salt master CLI Example: salt '*' cp.get_file salt://vimrc /etc/vimrc """ # Get the fileclient object client = salt.fileclient.get_file_client(__opts__) # Call get_file return client.get_file(path, dest, False, saltenv) Creating a fileclient instance outside of a minion module where the ``__opts__`` data is not available, it needs to be generated: .. code-block:: python import salt.fileclient import salt.config def get_file(path, dest, saltenv="base"): """ Used to get a single file from the Salt master """ # Get the configuration data opts = salt.config.minion_config("/etc/salt/minion") # Get the fileclient object client = salt.fileclient.get_file_client(opts) # Call get_file return client.get_file(path, dest, False, saltenv)
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/file_server/index.rst
0.840652
0.785391
index.rst
pypi
.. _delta-proxy-information: .. _delta-proxy-intro: =================== Delta proxy minions =================== Welcome to the delta proxy minion installation guide. This installation guide explains the process for installing and using delta proxy minion which is available beginning in version 3004. This guide is intended for system and network administrators with the general knowledge and experience required in the field. This guide is also intended for users that have ideally already tested and used standard Salt proxy minions in their environment before deciding to move to a delta proxy minion environment. See `Salt proxy minions <https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_ for more information. .. Note:: If you have not used standard Salt proxy minions before, consider testing and deploying standard Salt proxy minions in your environment first. Proxy minions vs. delta proxy minions ===================================== Salt can target network devices through `Salt proxy minions <https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_, Proxy minions allow you to control network devices that, for whatever reason, cannot run the standard Salt minion. Examples include: * Network gear that has an API but runs a proprietary operating system * Devices with limited CPU or memory * Devices that could run a minion but will not for security reasons A proxy minion acts as an intermediary between the Salt master and the device it represents. The proxy minion runs on the Salt master and then translates commands from the Salt master to the device as needed. By acting as an intermediary for the actual minion, proxy minions eliminate the need to establish a constant connection from a Salt master to a minion. Proxy minions generally only open a connection to the actual minion when necessary. Proxy minions also reduce the amount of CPU or memory the minion must spend checking for commands from the Salt master. Proxy minions use the Salt master's CPU or memory to check for commands. The actual minion only needs to use CPU or memory to run commands when needed. .. Note:: For more information about Salt proxy minions, see: * `Salt proxy minions <https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_ * `Salt proxy modules <https://docs.saltproject.io/en/latest/ref/proxy/all/index.html#all-salt-proxy>`_ When delta proxy minions are needed ----------------------------------- Normally, you would create a separate instance of proxy minion for each device that needs to be managed. However, this doesn't always scale well if you have thousands of devices. Running several thousand proxy minions can require a lot of memory and CPU. A delta proxy minion can solve this problem: it makes it possible to run one minion that acts as the intermediary between the Salt master and the many network devices it can represent. In this scenario, one device (the delta proxy minion on the Salt master) runs several proxies. This configuration boosts performance and improves the overall scalability of the network. Key terms ========= The following lists some important terminology that is used throughout this guide: .. list-table:: :widths: 25 75 :header-rows: 1 * - Term - Definition * - Salt master - The Salt master is a central node running the Salt master server. The Salt master issues commands to minions. * - minion - Minions are nodes running the Salt minion service. Minions listen to commands from a Salt master and perform the requested tasks, then return data back to the Salt master as needed. * - proxy minion - A Salt master that is running the proxy-minion service. The proxy minion acts as an intermediary between the Salt master and the device it represents. The proxy minion runs on the Salt master and then translates commands from the Salt master to the device. A separate instance of proxy minion is needed for each device that is managed. * - delta proxy minion - A Salt master that is running the delta proxy-minion service. The delta proxy minion acts as the intermediary between the Salt master and the many network devices it can represent. Only one instance of the delta proxy service is needed to run several proxies. * - control proxy - The control proxy runs on the Salt master. It manages a list of devices and issues commands to the network devices it represents. The Salt master needs at least one control proxy, but it is possible to have more than one control proxy, each managing a different set of devices. * - managed device - A device (such as Netmiko) that is managed by proxy minions or by a control proxy minion. The proxy minion or control proxy only creates a connection to the actual minion it needs to issue a command. * - pillar file - Pillars are structures of data (files) defined on the Salt master and passed through to one or more minions when the minion needs access to the pillar file. Pillars allow confidential, targeted data to be securely sent only to the relevant minion. Because all configurations for delta proxy minions are done on the Salt master (not on the minions), you use pillar files to configure the delta proxy-minion service. * - top file - The top file is a pillar file that maps which states should be applied to different minions in certain environments. .. _delta-proxy-preinstall: Pre-installation ================ Before you start ---------------- Before installing the delta proxy minion, ensure that: * Your network device and firmware are supported. * The Salt master that is acting as the control proxy minion has network access to the devices it is managing. * You have installed, configured, and tested standard Salt proxy minions in your environment before introducing delta proxy minions into your environment. Install or upgrade Salt ----------------------- Ensure your Salt masters are running at least Salt version 3004. For instructions on installing or upgrading Salt, see `repo.saltproject.io <http://repo.saltproject.io/>`_. For RedHat systems, see `Install or Upgrade Salt <https://enterprise.saltproject.io/en/latest/docs/install-salt.html>`_. .. _delta-proxy-install: Installation ============ Before you begin the delta proxy minion installation process, ensure you have read and completed the :ref:`delta-proxy-preinstall` steps. Overview of the installation process ------------------------------------ Similar to proxy minions, all the delta proxy minion configurations are done on the Salt master rather than on the minions that will be managed. The installation process has the following phases: #. `Configure the master to use delta proxy`_ - Create a configuration file on the Salt master that defines its proxy settings. #. `Create a pillar file for each managed device`_ - Create a pillar file for each device that will be managed by the delta proxy minion and reference these minions in the top file. #. `Create a control proxy configuration file`_ - Create a control proxy file that lists the devices that it will manage. Then, reference this file in the top file. #. `Start the delta proxy minion`_ - Start the delta proxy-minion service and validate that it has been set up correctly. Configure the master to use delta proxy --------------------------------------- In this step, you'll create a configuration file on the Salt master that defines its proxy settings. This is a general configuration file that tells the Salt master how to handle all proxy minions. To create this configuration: #. On the Salt master, navigate to the ``/etc/salt`` directory. In this directory, create a file named ``proxy`` if one doesn't already exist. #. Open the file in your preferred editor and add the following configuration information: .. code-block:: yaml # Use delta proxy metaproxy metaproxy: deltaproxy # Disable the FQDNS grain enable_fqdns_grains: False # Enabled multprocessing multiprocessing: True .. Note:: See the following section about `delta proxy configuration options`_ for a more detailed description of these configuration options. #. Save the file. Your Salt master is now configured to use delta proxy. Next, you need to `Create a pillar file for each managed device`_. Delta proxy configuration options --------------------------------- The following table describes the configuration options used in the delta proxy configuration file: .. list-table:: :widths: 25 75 :header-rows: 1 * - Field - Description * - metaproxy - Set this configuration option to ``deltaproxy``. If this option is set to ``proxy`` or if this line is not included in the file, the Salt master will use the standard proxy service instead of the delta proxy service. * - enable_fqdns_grains - If your router does not have the ability to use Reverse DNS lookup to obtain the Fully Qualified Domain Name (fqdn) for an IP address, you'll need to change the ``enable_fqdns_grains`` setting in the pillar configuration file to ``False`` instead. * - multiprocessing - Multi-processing is the ability to run more than one task or process at the same time. A delta proxy minion has the ability to run with multi-processing turned off. If you plan to run with multi-processing enabled, you should also enable the ``skip_connect_on_init`` setting to ``True``. * - skip_connect_on_init - This setting tells the control proxy whether or not it should make a connection to the managed device when it starts. When set to ``True``, the delta proxy minion will only connect when it needs to issue commands to the managed devices. Create a pillar file for each managed device -------------------------------------------- Each device that needs to be managed by delta proxy needs a separate pillar file on the Salt master. To create this file: #. Navigate to the ``/srv/pillar`` directory. #. In this directory create a new pillar file for a minion. For example, ``my_managed_device_pillar_file_01.sls``. #. Open the new file in your preferred editor and add the necessary configuration information for that minion and your environment. The following is an example pillar file for a Netmiko device: .. code-block:: yaml proxy: proxytype: netmiko device_type: arista_eos host: 192.0.2.1 username: myusername password: mypassword always_alive: True .. Note:: The available configuration options vary depending on the proxy type (in other words, the type of device it is). To read a detailed explanation of the configuration options, refer to the proxy module documentation for the type of device you need to manage. See: * `Salt proxy modules <https://docs.saltproject.io/en/latest/ref/proxy/all/index.html#all-salt-proxy>`_ * `Netmiko Salt proxy module <https://docs.saltproject.io/en/latest/ref/proxy/all/salt.proxy.netmiko_px.html#module-salt.proxy.netmiko_px>`_ #. Save the file. #. In an editor, open the top file: ``/srv/pillar/top.sls``. #. Add a section to the top file that indicates the minion ID of the device that will be managed. Then, list the name of the pillar file you created in the previous steps. For example: .. code-block:: yaml my_managed_device_minion_ID: - my_managed_device_pillar_file_01 #. Repeat the previous steps for each minion that needs to be managed. You've now created the pillar file for the minions that will be managed by the delta proxy minion and you have referenced these files in the top file. Proceed to the next section. Create a control proxy configuration file ----------------------------------------- On the Salt master, you'll need to create or edit a control proxy file for each control proxy. The control proxy manages several devices and issues commands to the network devices it represents. The Salt master needs at least one control proxy, but it is possible to have more than one control proxy, each managing a different set of devices. To configure a control proxy, you'll create a file that lists the minion IDs of the minions that it will manage. Then you will reference this control proxy configuration file in the top file. To create a control proxy configuration file: #. On the Salt master, navigate to the ``/srv/pillar`` directory. In this directory, create a new proxy configuration file. Give this file a descriptive name, such as ``control_proxy_01_configuration.sls``. #. Open the file in your preferred editor and add a list of the minion IDs for each device that needs to be managed. For example: .. code-block:: yaml proxy: proxytype: deltaproxy ids: - my_managed_device_01 - my_managed_device_02 - my_managed_device_03 #. Save the file. #. In an editor, open the top file: ``/srv/pillar/top.sls``. #. Add a section to the top file that indicates references the delta proxy control proxy. For example: .. code-block:: yaml base: my_managed_device_minion_01: - my_managed_device_pillar_file_01 my_managed_device_minion_02: - my_managed_device_pillar_file_02 my_managed_device_minion_03: - my_managed_device_pillar_file_03 delta_proxy_control: - control_proxy_01_configuration #. Repeat the previous steps for each control proxy if needed. Now that you have created the necessary configurations, proceed to the next section. Start the delta proxy minion ---------------------------- After you've successfully configured the delta proxy minion, you need to start the proxy minion service for each managed device and validate that it is working correctly. .. Note:: This step explains the process for starting a single instance of a delta proxy minion. Because starting each minion individually can potentially be very time-consuming, most organizations use a script to start their delta proxy minions since there are typically many devices being managed. Consider implementing a similar script for your environment to save time in deployment. To start a single instance of a delta proxy minion and test that it is configured correctly: #. In the terminal for the Salt master, run the following command, replacing the placeholder text with the actual minion ID: .. code-block:: bash sudo salt-proxy --proxyid=my_managed_device_minion_ID #. To test the delta proxy minion, run the following ``test.version`` command on the Salt master and target a specific minion. For example: .. code-block:: bash salt my_managed_device_minion_ID test.version This command returns an output similar to the following: .. code-block:: bash local: 3004 After you've successfully started the delta proxy minions and verified that they are working correctly, you can now use these minions the same as standard proxy minions. .. _delta-proxy-additional-resources: Additional resources ==================== This reference section includes additional resources for delta proxy minions. For reference, see: * `Salt proxy minions <https://docs.saltproject.io/en/latest/topics/proxyminion/index.html>`_ * `Salt proxy modules <https://docs.saltproject.io/en/latest/ref/proxy/all/index.html#all-salt-proxy>`_ * `Netmiko Salt proxy module <https://docs.saltproject.io/en/latest/ref/proxy/all/salt.proxy.netmiko_px.html#module-salt.proxy.netmiko_px>`_
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/ref/configuration/delta_proxy.rst
0.943034
0.726717
delta_proxy.rst
pypi
import re from docutils import nodes from docutils.parsers.rst.roles import set_classes from pygments.lexer import RegexLexer, bygroups from pygments.lexers import get_lexer_by_name from pygments.token import Keyword, Literal, Name, Number, Operator, Text from pygments.util import ClassNotFound from sphinx import addnodes from sphinx.directives import ObjectDescription from sphinx.domains import Domain, Index, ObjType from sphinx.roles import XRefRole from sphinx.util.docfields import GroupedField, TypedField from sphinx.util.nodes import make_refnode class DocRef: """Represents a link to an RFC which defines an HTTP method.""" def __init__(self, base_url, anchor, section): """Stores the specified attributes which represent a URL which links to an RFC which defines an HTTP method. """ self.base_url = base_url self.anchor = anchor self.section = section def __repr__(self): """Returns the URL which this object represents, which points to the location of the RFC which defines some HTTP method. """ return "{}#{}{}".format(self.base_url, self.anchor, self.section) #: The URL of the HTTP/1.1 RFC which defines the HTTP methods OPTIONS, GET, #: HEAD, POST, PUT, DELETE, TRACE, and CONNECT. RFC2616 = "http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html" #: The name to use for section anchors in RFC2616. RFC2616ANCHOR = "sec" #: The URL of the RFC which defines the HTTP PATCH method. RFC5789 = "http://tools.ietf.org/html/rfc5789" #: The name to use for section anchors in RFC5789. RFC5789ANCHOR = "section-" #: Mapping from lowercase HTTP method name to :class:`DocRef` object which #: maintains the URL which points to the section of the RFC which defines that #: HTTP method. DOCREFS = { "patch": DocRef(RFC5789, RFC5789ANCHOR, 2), "options": DocRef(RFC2616, RFC2616ANCHOR, 9.2), "get": DocRef(RFC2616, RFC2616ANCHOR, 9.3), "head": DocRef(RFC2616, RFC2616ANCHOR, 9.4), "post": DocRef(RFC2616, RFC2616ANCHOR, 9.5), "put": DocRef(RFC2616, RFC2616ANCHOR, 9.6), "delete": DocRef(RFC2616, RFC2616ANCHOR, 9.7), "trace": DocRef(RFC2616, RFC2616ANCHOR, 9.8), "connect": DocRef(RFC2616, RFC2616ANCHOR, 9.9), } HTTP_STATUS_CODES = { 100: "Continue", 101: "Switching Protocols", 102: "Processing", 200: "OK", 201: "Created", 202: "Accepted", 203: "Non Authoritative Information", 204: "No Content", 205: "Reset Content", 206: "Partial Content", 207: "Multi Status", 226: "IM Used", # see RFC 3229 300: "Multiple Choices", 301: "Moved Permanently", 302: "Found", 303: "See Other", 304: "Not Modified", 305: "Use Proxy", 307: "Temporary Redirect", 400: "Bad Request", 401: "Unauthorized", 402: "Payment Required", # unused 403: "Forbidden", 404: "Not Found", 405: "Method Not Allowed", 406: "Not Acceptable", 407: "Proxy Authentication Required", 408: "Request Timeout", 409: "Conflict", 410: "Gone", 411: "Length Required", 412: "Precondition Failed", 413: "Request Entity Too Large", 414: "Request URI Too Long", 415: "Unsupported Media Type", 416: "Requested Range Not Satisfiable", 417: "Expectation Failed", 418: "I'm a teapot", # see RFC 2324 422: "Unprocessable Entity", 423: "Locked", 424: "Failed Dependency", 426: "Upgrade Required", 449: "Retry With", # proprietary MS extension 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway", 503: "Service Unavailable", 504: "Gateway Timeout", 505: "HTTP Version Not Supported", 507: "Insufficient Storage", 510: "Not Extended", } http_sig_param_re = re.compile( r"\((?:(?P<type>[^:)]+):)?(?P<name>[\w_]+)\)", re.VERBOSE ) def http_resource_anchor(method, path): path = re.sub(r"[<>:/]", "-", path) return method.lower() + "-" + path class HTTPResource(ObjectDescription): doc_field_types = [ TypedField( "parameter", label="Parameters", names=("param", "parameter", "arg", "argument"), typerolename="obj", typenames=("paramtype", "type"), ), TypedField( "jsonparameter", label="JSON Parameters", names=("jsonparameter", "jsonparam", "json"), typerolename="obj", typenames=("jsonparamtype", "jsontype"), ), TypedField( "queryparameter", label="Query Parameters", names=("queryparameter", "queryparam", "qparam", "query"), typerolename="obj", typenames=("queryparamtype", "querytype", "qtype"), ), GroupedField( "formparameter", label="Form Parameters", names=("formparameter", "formparam", "fparam", "form"), ), GroupedField( "requestheader", label="Request Headers", rolename="mailheader", names=("reqheader", "requestheader"), ), GroupedField( "responseheader", label="Response Headers", rolename="mailheader", names=("resheader", "responseheader"), ), GroupedField( "statuscode", label="Status Codes", rolename="statuscode", names=("statuscode", "status", "code"), ), ] method = NotImplemented def handle_signature(self, sig, signode): method = self.method.upper() + " " signode += addnodes.desc_name(method, method) offset = 0 for match in http_sig_param_re.finditer(sig): path = sig[offset : match.start()] signode += addnodes.desc_name(path, path) params = addnodes.desc_parameterlist() typ = match.group("type") if typ: typ = typ + ": " params += addnodes.desc_annotation(typ, typ) name = match.group("name") params += addnodes.desc_parameter(name, name) signode += params offset = match.end() if offset < len(sig): path = sig[offset : len(sig)] signode += addnodes.desc_name(path, path) fullname = self.method.upper() + " " + path signode["method"] = self.method signode["path"] = sig signode["fullname"] = fullname return (fullname, self.method, sig) def needs_arglist(self): return False def add_target_and_index(self, name_cls, sig, signode): signode["ids"].append(http_resource_anchor(*name_cls[1:])) self.env.domaindata["http"][self.method][sig] = (self.env.docname, "") def get_index_text(self, modname, name): return "" class HTTPOptions(HTTPResource): method = "options" class HTTPHead(HTTPResource): method = "head" class HTTPPatch(HTTPResource): method = "patch" class HTTPPost(HTTPResource): method = "post" class HTTPGet(HTTPResource): method = "get" class HTTPPut(HTTPResource): method = "put" class HTTPDelete(HTTPResource): method = "delete" class HTTPTrace(HTTPResource): method = "trace" def http_statuscode_role(name, rawtext, text, lineno, inliner, options={}, content=[]): if text.isdigit(): code = int(text) try: status = HTTP_STATUS_CODES[code] except KeyError: msg = inliner.reporter.error( "%d is invalid HTTP status code" % code, lineno=lineno ) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] else: try: code, status = re.split(r"\s", text.strip(), 1) code = int(code) except ValueError: msg = inliner.reporter.error( "HTTP status code must be an integer (e.g. `200`) or " "start with an integer (e.g. `200 OK`); %r is invalid" % text, line=lineno, ) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] nodes.reference(rawtext) if code == 226: url = "http://www.ietf.org/rfc/rfc3229.txt" if code == 418: url = "http://www.ietf.org/rfc/rfc2324.txt" if code == 449: url = "http://msdn.microsoft.com/en-us/library/dd891478(v=prot.10).aspx" elif code in HTTP_STATUS_CODES: url = ( "http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10." + "%d.%d" % (code // 100, 1 + code % 100) ) else: url = "" set_classes(options) node = nodes.reference(rawtext, "%d %s" % (code, status), refuri=url, **options) return [node], [] def http_method_role(name, rawtext, text, lineno, inliner, options={}, content=[]): method = str(text).lower() if method not in DOCREFS: msg = inliner.reporter.error( "%s is not valid HTTP method" % method, lineno=lineno ) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] url = str(DOCREFS[method]) node = nodes.reference(rawtext, method.upper(), refuri=url, **options) return [node], [] class HTTPXRefRole(XRefRole): def __init__(self, method, **kwargs): XRefRole.__init__(self, **kwargs) self.method = method def process_link(self, env, refnode, has_explicit_title, title, target): if not target.startswith("/"): pass if not has_explicit_title: title = self.method.upper() + " " + title return title, target class HTTPIndex(Index): name = "routingtable" localname = "HTTP Routing Table" shortname = "routing table" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.ignore = [ [l for l in x.split("/") if l] for x in self.domain.env.config["http_index_ignore_prefixes"] ] self.ignore.sort(key=lambda x: -len(x)) def grouping_prefix(self, path): letters = [x for x in path.split("/") if x] for prefix in self.ignore: if letters[: len(prefix)] == prefix: return "/" + "/".join(letters[: len(prefix) + 1]) return "/{}".format(letters[0] if letters else "") def generate(self, docnames=None): content = {} items = ( (method, path, info) for method, routes in self.domain.routes.items() for path, info in routes.items() ) items = sorted(items, key=lambda item: item[1]) for method, path, info in items: entries = content.setdefault(self.grouping_prefix(path), []) entries.append( [ method.upper() + " " + path, 0, info[0], http_resource_anchor(method, path), "", "", info[1], ] ) content = sorted(content.items(), key=lambda k: k[0]) return (content, True) class HTTPDomain(Domain): """HTTP domain.""" name = "http" label = "HTTP" object_types = { "options": ObjType("options", "options", "obj"), "head": ObjType("head", "head", "obj"), "post": ObjType("post", "post", "obj"), "get": ObjType("get", "get", "obj"), "put": ObjType("put", "put", "obj"), "patch": ObjType("patch", "patch", "obj"), "delete": ObjType("delete", "delete", "obj"), "trace": ObjType("trace", "trace", "obj"), } directives = { "options": HTTPOptions, "head": HTTPHead, "post": HTTPPost, "get": HTTPGet, "put": HTTPPut, "patch": HTTPPatch, "delete": HTTPDelete, "trace": HTTPTrace, } roles = { "options": HTTPXRefRole("options"), "head": HTTPXRefRole("head"), "post": HTTPXRefRole("post"), "get": HTTPXRefRole("get"), "put": HTTPXRefRole("put"), "patch": HTTPXRefRole("patch"), "delete": HTTPXRefRole("delete"), "trace": HTTPXRefRole("trace"), "statuscode": http_statuscode_role, "method": http_method_role, } initial_data = { "options": {}, # path: (docname, synopsis) "head": {}, "post": {}, "get": {}, "put": {}, "patch": {}, "delete": {}, "trace": {}, } # indices = [HTTPIndex] indices = [] @property def routes(self): return {key: self.data[key] for key in self.object_types} def clear_doc(self, docname): for typ, routes in self.routes.items(): for path, info in list(routes.items()): if info[0] == docname: del routes[path] def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): try: info = self.data[str(typ)][target] except KeyError: return else: anchor = http_resource_anchor(typ, target) title = typ.upper() + " " + target return make_refnode(builder, fromdocname, info[0], anchor, contnode, title) def get_objects(self): for method, routes in self.routes.items(): for path, info in routes.items(): anchor = http_resource_anchor(method, path) yield (path, path, method, info[0], anchor, 1) class HTTPLexer(RegexLexer): """Lexer for HTTP sessions.""" name = "HTTP" aliases = ["http"] flags = re.DOTALL def header_callback(self, match): if match.group(1).lower() == "content-type": content_type = match.group(5).strip() if ";" in content_type: content_type = content_type[: content_type.find(";")].strip() self.content_type = content_type yield match.start(1), Name.Attribute, match.group(1) yield match.start(2), Text, match.group(2) yield match.start(3), Operator, match.group(3) yield match.start(4), Text, match.group(4) yield match.start(5), Literal, match.group(5) yield match.start(6), Text, match.group(6) def continuous_header_callback(self, match): yield match.start(1), Text, match.group(1) yield match.start(2), Literal, match.group(2) yield match.start(3), Text, match.group(3) def content_callback(self, match): content_type = getattr(self, "content_type", None) content = match.group() offset = match.start() if content_type: from pygments.lexers import get_lexer_for_mimetype try: lexer = get_lexer_for_mimetype(content_type) except ClassNotFound: pass else: for idx, token, value in lexer.get_tokens_unprocessed(content): yield offset + idx, token, value return yield offset, Text, content tokens = { "root": [ ( r"(GET|POST|PUT|PATCH|DELETE|HEAD|OPTIONS|TRACE)( +)([^ ]+)( +)" r"(HTTPS?)(/)(1\.[01])(\r?\n|$)", bygroups( Name.Function, Text, Name.Namespace, Text, Keyword.Reserved, Operator, Number, Text, ), "headers", ), ( r"(HTTPS?)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)", bygroups( Keyword.Reserved, Operator, Number, Text, Number, Text, Name.Exception, Text, ), "headers", ), ], "headers": [ (r"([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)", header_callback), (r"([\t ]+)([^\r\n]+)(\r?\n|$)", continuous_header_callback), (r"\r?\n", Text, "content"), ], "content": [(r".+", content_callback)], } def setup(app): app.add_domain(HTTPDomain) try: get_lexer_by_name("http") except ClassNotFound: app.add_lexer("http", HTTPLexer()) app.add_config_value("http_index_ignore_prefixes", [], None)
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/_ext/httpdomain.py
0.732687
0.205475
httpdomain.py
pypi
.. _salt-system-architecture: ======================== Salt system architecture ======================== Overview ======== This page provides a high-level overview of the Salt system architecture and its different components. What is Salt? ============= Salt is a Python-based open-source remote execution framework used for: * Configuration management * Automation * Provisioning * Orchestration The Salt system architecture ============================ The following diagram shows the primary components of the basic Salt architecture: .. image:: /_static/salt-architecture.png :width: 80% The following sections describe some of the core components of the Salt architecture. Salt Masters and Salt Minions ----------------------------- Salt uses the master-client model in which a master issues commands to a client and the client executes the command. In the Salt ecosystem, the Salt Master is a server that is running the ``salt-master`` service. It issues commands to one or more Salt Minions, which are servers that are running the ``salt-minion`` service and that are registered with that particular Salt Master. Another way to describe Salt is as a publisher-subscriber model. The master publishes jobs that need to be executed and Salt Minions subscribe to those jobs. When a specific job applies to that minion, it will execute the job. When a minion finishes executing a job, it sends job return data back to the master. Salt has two ports used by default for the minions to communicate with their master(s). These ports work in concert to receive and deliver data to the Message Bus. Salt’s message bus is ZeroMQ, which creates an asynchronous network topology to provide the fastest communication possible. Targets and grains ------------------ The master indicates which minions should execute the job by defining a *target*. A target is the group of minions, across one or many masters, that a job's Salt command applies to. .. Note:: A master can also be managed like a minion and can be a target if it is running the ``salt-minion`` service. The following is an example of one of the many kinds of commands that a master might issue to a minion. This command indicates that all minions should install the Vim application: .. code-block:: bash salt -v '*' pkg.install vim In this case the glob ``'*'`` is the target, which indicates that all minions should execute this command. Many other targeting options are available, including targeting a specific minion by its ID or targeting minions by their shared traits or characteristics (called *grains* in Salt). Salt comes with an interface to derive information about the underlying system. This is called the *grains interface*, because it presents Salt with grains of information. Grains are collected for the operating system, domain name, IP address, kernel, OS type, memory, and many other system properties. You can also create your own custom grain data. Grain data is relatively static. However, grain data is refreshed when system information changes (such as network settings) or when a new value is assigned to a custom grain. Open event system (event bus) ----------------------------- The event system is used for inter-process communication between the Salt Master and Salt Minions. In the event system: * Events are seen by both the master and minions. * Events can be monitored and evaluated by both. The event bus lays the groundwork for orchestration and real-time monitoring. All minions see jobs and results by subscribing to events published on the event system. Salt uses a pluggable event system with two layers: * **ZeroMQ (0MQ)** - The current default socket-level library providing a flexible transport layer. * **Tornado** - Full TCP-based transport layer event system. One of the greatest strengths of Salt is the speed of execution. The event system’s communication bus is more efficient than running a higher-level web service (http). The remote execution system is the component that all components are built upon, allowing for decentralized remote execution to spread load across resources. Salt states ----------- In addition to remote execution, Salt provides another method for configuring minions by declaring which *state* a minion should be in, otherwise referred to as *Salt states*. Salt states make configuration management possible. You can use Salt states to deploy and manage infrastructure with simple YAML files. Using states, you can automate recursive and predictable tasks by queueing jobs for Salt to implement without needing user input. You can also add more complex conditional logic to state files with Jinja. To illustrate the subtle differences between remote execution and configuration management, take the command referenced in the previous section about `Targets and grains`_ in which Salt installed the application Vim on all minions: .. list-table:: :widths: 25 25 50 :header-rows: 1 * - Methodology - Implementation - Result * - Remote execution - * Run ``salt -v '*' pkg.install vim`` from the terminal - * Remotely installs Vim on the targeted minions * - Configuration management - * Write a YAML state file that checks whether Vim is installed * This state file is then applied to the targeted minions - * Ensures that Vim is always installed on the targeted minions * Salt analyzes the state file and determines what actions need to be taken to ensure the minion complies with the state declarations * If Vim is not installed, it automates the processes to install Vim on the targeted minions The state file that verifies Vim is installed might look like the following example: .. code-block:: yaml # File:/srv/salt/vim_install.sls install_vim_now: pkg.installed: - pkgs: - vim To apply this state to a minion, you would use the ``state.apply`` module, such as in the following example: .. code-block:: bash salt '*' state.apply vim_install This command applies the ``vim_install`` state to all minions. *Formulas* are collections of states that work in harmony to configure a minion or application. For example, one state might trigger another state. The Top file ------------ It is not practical to manually run each state individually targeting specific minions each time. Some environments have hundreds of state files targeting thousands of minions. Salt offers two features to help with this scaling problem: * **The top.sls file** - Maps Salt states to their applicable minions. * **Highstate execution** - Runs all Salt states outlined in ``top.sls`` in a single execution. The top file maps which states should be applied to different minions in certain environments. The following is an example of a simple top file: .. code-block:: yaml # File: /srv/salt/top.sls base: '*': - all_server_setup '01webserver': - web_server_setup In this example, ``base`` refers to the Salt environment, which is the default. You can specify more than one environment as needed, such as prod, dev, QA, etc. Groups of minions are specified under the environment, and states are listed for each set of minions. This top file indicates that a state called ``all_server_setup`` should be applied to all minions ``'*'`` and the state called ``web_server_setup`` should be applied to the ``01webserver`` minion. To run the Salt command, you would use the state.highstate function: .. code-block:: bash salt \* state.highstate This command applies the top file to the targeted minions. Salt pillar ----------- Salt’s pillar feature takes data defined on the Salt Master and distributes it to minions as needed. Pillar is primarily used to store secrets or other highly sensitive data, such as account credentials, cryptographic keys, or passwords. Pillar is also useful for storing non-secret data that you don't want to place directly in your state files, such as configuration data. Salt pillar brings data into the cluster from the opposite direction as grains. While grains are data generated from the minion, the pillar is data generated from the master. Pillars are organized similarly to states in a Pillar state tree, where ``top.sls`` acts to coordinate pillar data to environments and minions privy to the data. Information transferred using pillar has a dictionary generated for the targeted minion and encrypted with that minion’s key for secure data transfer. Pillar data is encrypted on a per-minion basis, which makes it useful for storing sensitive data specific to a particular minion. Beacons and reactors -------------------- The beacon system is a monitoring tool that can listen for a variety of system processes on Salt Minions. Beacons can trigger reactors which can then help implement a change or troubleshoot an issue. For example, if a service’s response times out, the reactor system can restart the service. Beacons are used for a variety of purposes, including: * Automated reporting * Error log delivery * Microservice monitoring * User shell activity * Resource monitoring When coupled with reactors, beacons can create automated pre-written responses to infrastructure and application issues. Reactors expand Salt with automated responses using pre-written remediation states. Reactors can be applied in a variety of scenarios: * Infrastructure scaling * Notifying administrators * Restarting failed applications * Automatic rollback When both beacons and reactors are used together , you can create unique states customized to your specific needs. Salt runners and orchestration ------------------------------ Salt runners are convenience applications executed with the ``salt-run`` command. Salt runners work similarly to Salt execution modules. However, they execute on the Salt Master instead of the Salt Minions. A Salt runner can be a simple client call or a complex application. Salt provides the ability to orchestrate system administrative tasks throughout the enterprise. Orchestration makes it possible to coordinate the activities of multiple machines from a central place. It has the added advantage of being able to control the sequence of when certain configuration events occur. Orchestration states execute on the master using the state runner module.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/salt_system_architecture.rst
0.923178
0.824533
salt_system_architecture.rst
pypi
.. _release-2015-5-0: ============================================== Salt 2015.5.0 Release Notes - Codename Lithium ============================================== The 2015.5.0 feature release of Salt is focused on hardening Salt and mostly on improving existing systems. A few major additions are present, primarily the new Beacon system. Most enhancements have been focused around improving existing features and interfaces. As usual the release notes are not exhaustive and primarily include the most notable additions and improvements. Hundreds of bugs have been fixed and many modules have been substantially updated and added. .. warning:: In order to fix potential shell injection vulnerabilities in salt modules, a change has been made to the various ``cmd`` module functions. These functions now default to ``python_shell=False``, which means that the commands will not be sent to an actual shell. The largest side effect of this change is that "shellisms", such as pipes, will not work by default. The modules shipped with salt have been audited to fix any issues that might have arisen from this change. Additionally, the ``cmd`` state module has been unaffected, and use of ``cmd.run`` in jinja is also unaffected. ``cmd.run`` calls on the CLI will also allow shellisms. However, custom execution modules which use shellisms in ``cmd`` calls will break, unless you pass ``python_shell=True`` to these calls. As a temporary workaround, you can set ``cmd_safe: False`` in your minion and master configs. This will revert the default, but is also less secure, as it will allow shell injection vulnerabilities to be written in custom code. We recommend you only set this setting for as long as it takes to resolve these issues in your custom code, then remove the override. .. note:: Starting in this version of salt, ``pillar_opts`` defaults to False instead of True. This means that master opts will not be present in minion pillar, and as a result, ``config.get`` calls will not include master opts. We recommend pillar is used for configuration options which need to make it to the minion. Beacons ======= The beacon system allows the minion to hook into system processes and continually translate external events into the salt event bus. The primary example of this is the :py:mod:`~salt.beacons.inotify` beacon. This beacon uses inotify to watch configured files or directories on the minion for changes, creation, deletion etc. This allows for the changes to be sent up to the master where the reactor can respond to changes. Sudo Minion Settings ==================== It is now possible to run the minion as a non-root user and for the minion to execute commands via sudo. Simply add `sudo_user: root` to the minion config, run the minion as a non-root user and grant that user sudo rights to execute salt-call. Lazy Loader =========== The Lazy Loader is a significant overhaul of Salt's module loader system. The Lazy Loader will lazily load modules on access instead of all on start. In addition to a major performance improvement, this "sandboxes" modules so a bad/broken import of a single module will only affect jobs that require accessing the broken module. (:issue: `20274`) Enhanced Active Directory Support ================================= The eauth system for LDAP has been extended to support Microsoft Active Directory out of the box. This includes Active Directory and LDAP group support for eauth. Salt LXC Enhancements ===================== The LXC systems have been overhauled to be more consistent and to fix many bugs. This overhaul makes using LXC with Salt much easier and substantially improves the underlying capabilities of Salt's LXC integration. Salt SSH ======== - Additional configuration options and command line flags have been added to configure the scan roster on the fly - Added support for ``state.single`` in ``salt-ssh`` - Added support for ``publish.publish``, ``publish.full_data``, and ``publish.runner`` in ``salt-ssh`` - Added support for ``mine.get`` in ``salt-ssh`` New Windows Installer ===================== The new Windows installer changes how Salt is installed on Windows. The old installer used bbfreeze to create an isolated python environment to execute in. This made adding modules and python libraries difficult. The new installer sets up a more flexible python environment making it easy to manage the python install and add python modules. Instead of frozen packages, a full python implementation resides in the bin directory (``C:\salt\bin``). By executing pip or easy_install from within the Scripts directory (``C:\salt\bin\Scripts``) you can install any additional python modules you may need for your custom environment. The .exe's that once resided at the root of the salt directory (``C:\salt``) have been replaced by .bat files and should function the same way as the .exe's in previous versions. The new Windows Installer will not replace the minion config file and key if they already exist on the target system. Only the salt program files will be replaced. ``C:\salt\conf`` and ``C:\salt\var`` will remain unchanged. Removed Requests Dependency =========================== The hard dependency on the requests library has been removed. Requests is still required by a number of cloud modules but is no longer required for normal Salt operations. This removal fixes issues that were introduced with requests and salt-ssh, as well as issues users experienced from the many different packaging methods used by requests package maintainers. Python 3 Updates ================ While Salt does not YET run on Python 3 it has been updated to INSTALL on Python 3, taking us one step closer. What remains is getting the test suite to the point where it can run on Python 3 so that we can verify compatibility. RAET Additions ============== The RAET support continues to improve. RAET now supports multi-master and many bugs and performance issues have been fixed. RAET is much closer to being a first class citizen. Modified File Detection ======================= A number of functions have been added to the RPM-based package managers to detect and diff files that are modified from the original package installs. This can be found in the new pkg.modified functions. Reactor Update ============== Fix an infinite recursion problem for runner/wheel reactor jobs by passing a "user" (Reactor) to all jobs that the reactor starts. The reactor skips all events created by that username -- thereby only reacting to events not caused by itself. Because of this, runner and wheel executions from the runner will have user "Reactor" in the job cache. Misc Fixes/Additions ==================== - SDB driver for etcd. (:issue: `22043`) - Add ``only_upgrade`` argument to apt-based ``pkg.install`` to only install a package version if the package is already installed. (Great for security updates!) - Joyent now requires a ``keyname`` to be specified in the provider configuration. This change was necessitated upstream by the 7.0+ API. - Add ``args`` argument to ``cmd.script_retcode`` to match ``cmd.script`` in the :py:mod:`cmd module <salt.cmd.cmdmod>`. (:issue: `21122`) - Fixed bug where TCP keepalive was not being sent on the defined interval on the return port (4506) from minion to master. (:issue: `21465`) - LocalClient may now optionally raise SaltClientError exceptions. If using this class directly, checking for and handling this exception is recommended. (:issue: `21501`) - The SAuth object is now a singleton, meaning authentication state is global (per master) on each minion. This reduces sign-ins of minions from 3->1 per startup. - Nested outputter has been optimized, it is now much faster. - Extensive fileserver backend updates. Deprecations ============ - Removed ``parameter`` keyword argument from ``eselect.exec_action`` execution module. - Removed ``runas`` parameter from the following ``pip``` execution module functions: ``install``, ``uninstall``, ``freeze``, ``list_``, ``list_upgrades``, ``upgrade_available``, ``upgrade``. Please migrate to ``user``. - Removed ``runas`` parameter from the following ``pip`` state module functions: ``installed``, ``removed``, ``uptodate`` . Please migrate to ``user``. - Removed ``quiet`` option from all functions in ``cmdmod`` execution module. Please use ``output_loglevel=quiet`` instead. - Removed ``parameter`` argument from ``eselect.set_`` state. Please migrate to ``module_parameter`` or ``action_parameter``. - The ``salt_events`` table schema has changed to include an additional field called ``master_id`` to distinguish between events flowing into a database from multiple masters. If ``event_return`` is enabled in the master config, the database schema must first be updated to add the ``master_id`` field. This alteration can be accomplished as follows: ``ALTER TABLE salt_events ADD master_id VARCHAR(255) NOT NULL;`` Known Issues ============ - In multi-master mode, a minion may become temporarily unresponsive if modules or pillars are refreshed at the same time that one or more masters are down. This can be worked around by setting 'auth_timeout' and 'auth_tries' down to shorter periods.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/2015.5.0.rst
0.794982
0.764935
2015.5.0.rst
pypi
=========================== Salt 2016.3.5 Release Notes =========================== Version 2016.3.5 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`. Statistics ========== - Total Merges: **190** - Total Issue References: **112** - Total PR References: **281** - Contributors: **74** (`Ch3LL`_, `DmitryKuzmenko`_, `Firewire2002`_, `Mrten`_, `Talkless`_, `TronPaul`_, `UtahDave`_, `aaronm-cloudtek`_, `alex-zel`_, `alexandr-orlov`_, `alexbleotu`_, `attiasr`_, `basepi`_, `bdrung`_, `bshelton229`_, `cachedout`_, `calve`_, `clan`_, `clinta`_, `cro`_, `dere`_, `dereckson`_, `dhaines`_, `dincamihai`_, `do3meli`_, `dragon788`_, `edgan`_, `fedusia`_, `fj40crawler`_, `genuss`_, `gtmanfred`_, `haeac`_, `heewa`_, `hu-dabao`_, `jeanpralo`_, `jfindlay`_, `jinm`_, `kevinquinnyo`_, `kontrolld`_, `laleocen`_, `lorengordon`_, `m03`_, `mcalmer`_, `mchugh19`_, `meaksh`_, `mikejford`_, `moio`_, `multani`_, `nevins-b`_, `pass-by-value`_, `rallytime`_, `rbjorklin`_, `siccrusher`_, `silenius`_, `sjmh`_, `sjorge`_, `skizunov`_, `slinn0`_, `sofixa`_, `techhat`_, `tedski`_, `terminalmage`_, `thatch45`_, `thusoy`_, `toanju`_, `tobithiel`_, `twangboy`_, `tyhunt99`_, `vutny`_, `wanparo`_, `whiteinge`_, `xiaoanyunfei`_, `yhekma`_, `zwo-bot`_) Security Fixes ============== **CVE-2017-5192** local_batch client external authentication not respected The ``LocalClient.cmd_batch()`` method client does not accept ``external_auth`` credentials and so access to it from salt-api has been removed for now. This vulnerability allows code execution for already-authenticated users and is only in effect when running salt-api as the ``root`` user. **CVE-2017-5200** Salt-api allows arbitrary command execution on a salt-master via Salt's ssh_client Users of Salt-API and salt-ssh could execute a command on the salt master via a hole when both systems were enabled. We recommend everyone on the 2016.3 branch upgrade to a patched release as soon as possible. Improved Checksum Handling in :py:func:`file.managed <salt.states.file.managed>`, :py:func:`archive.extracted <salt.states.archive.extracted>` States ===================================================================================================================================================== When the ``source_hash`` argument for these states refers to a file containing checksums, Salt now looks for checksums matching the name of the source URI, as well as the file being managed. Prior releases only looked for checksums matching the filename being managed. Additionally, a new argument (``source_hash_name``) has been added, which allows the user to disambiguate ambiguous matches when more than one matching checksum is found in the ``source_hash`` file. A more detailed explanation of this functionality can be found in the :py:func:`file.managed <salt.states.file.managed>` documentation, in the section for the new ``source_hash_name`` argument. Changelog for v2016.3.4..v2016.3.5 ================================== *Generated at: 2018-05-27 05:09:33 UTC* * **PR** `#38833`_: (`Ch3LL`_) add 2016.3.5 changelog to release notes @ *2017-01-19 23:27:26 UTC* * a04ab86da1 Merge pull request `#38833`_ from Ch3LL/add_release_notes_2016.3.5 * 374dc1ab88 skip 2016.3.5 due to :doc: references * 31f324c4ff add 2016.3.5 changelog to release notes * **PR** `#38812`_: (`rallytime`_) Update pyobjects test to be a list @ *2017-01-18 21:06:01 UTC* * d14f0c64eb Merge pull request `#38812`_ from rallytime/pyobjects-test * f3e84c1ab7 Update pyobjects test to be a list * **ISSUE** `#36598`_: (`ikkaro`_) CloudClient vmware driver reusing SI bug (refs: `#38813`_) * **PR** `#38813`_: (`gtmanfred`_) catch SIGPIPE in vmware connection @ *2017-01-18 21:05:42 UTC* * 50f03f8057 Merge pull request `#38813`_ from gtmanfred/2016.3 * ce3472cec2 catch SIGPIPE in vmware connection * **PR** `#38809`_: (`twangboy`_) Fix get_hostname to handle longer computer names @ *2017-01-18 19:32:00 UTC* * 23b8b47258 Merge pull request `#38809`_ from twangboy/fix_hostname_2016.3 * d57a51f9f9 Fix tests for get_hostname * 7ca3fd7484 Fix get_hostname to handle longer computer names * **ISSUE** `#38388`_: (`johje349`_) No INFO logs in minion log file (refs: `#38808`_) * **PR** `#38808`_: (`vutny`_) Fix `#38388`_ @ *2017-01-18 18:19:36 UTC* * 1033bbdde8 Merge pull request `#38808`_ from vutny/fix-38388 * 9bd203ffcc Fix `#38388`_ * **ISSUE** `#38604`_: (`jsandas`_) Using "batch" with saltmod errors with "ValueError: need more than 2 values to unpack" (refs: `#38668`_) * **PR** `#38668`_: (`terminalmage`_) Fix proposal for `#38604`_ @ *2017-01-18 17:53:09 UTC* * f3ae3cd5c8 Merge pull request `#38668`_ from terminalmage/issue38604 * 0ea97cdad9 Merge pull request `#10`_ from cachedout/pr-38668 * db81afc035 Munge retcode into return data for batching * a642a995dc Return the ret data from batch execution instead of raw data * **ISSUE** `#38622`_: (`mikejford`_) Incorrect saltenv argument documentation in salt.modules.state (refs: `#38789`_) * **PR** `#38789`_: (`rallytime`_) Update some saltenv refs to environment in salt.modules.state docs @ *2017-01-18 15:39:22 UTC* * c6a19a9e5a Merge pull request `#38789`_ from rallytime/fix-38622 * af41fe0c6e Update some saltenv refs to environment in salt.modules.state docs * **PR** `#38790`_: (`cachedout`_) Fix typo in pyobjects test @ *2017-01-18 15:38:57 UTC* * e0bf700020 Merge pull request `#38790`_ from cachedout/fix_pyobjects_test_typo * a66afb5f0f Fix typo in pyobjects test * **ISSUE** `#38629`_: (`Arabus`_) Conflicting documentation about default value of pillar_opts (refs: `#38792`_) * **PR** `#38792`_: (`rallytime`_) Update pillar tutorial lanuage regarding pillar_opts settings @ *2017-01-18 15:38:19 UTC* * 6e9785edea Merge pull request `#38792`_ from rallytime/fix-38629 * 1e125e2844 Update pillar tutorial lanuage regarding pillar_opts settings * **PR** `saltstack/salt#38707`_: (`alexbleotu`_) Fixed prepending of root_dir override to the other paths (refs: `#38796`_) * **PR** `#38796`_: (`cachedout`_) Revert "Fixed prepending of root_dir override to the other paths" @ *2017-01-17 23:18:18 UTC* * 3417adc617 Merge pull request `#38796`_ from saltstack/revert-38707-root_dir_fix-gh * cb080f3bbe Revert "Fixed prepending of root_dir override to the other paths" * **ISSUE** `#38524`_: (`rbjorklin`_) salt-api seems to ignore rest_timeout since 2016.11.0 (refs: `#38585`_, `#38527`_) * **ISSUE** `#38479`_: (`tyeapple`_) api_logfile setting takes no effect (refs: `#38585`_) * **PR** `#38585`_: (`rallytime`_) Follow up to PR `#38527`_ @ *2017-01-17 18:40:01 UTC* * **PR** `#38570`_: (`rallytime`_) [2016.11] Merge forward from 2016.3 to 2016.11 (refs: `#38585`_) * **PR** `#38560`_: (`Ch3LL`_) fix api logfile (refs: `#38585`_) * **PR** `#38527`_: (`rbjorklin`_) salt-api no longer forces the default timeout (refs: `#38585`_) * bab3479a3c Merge pull request `#38585`_ from rallytime/follow-up-38527 * 05587201b6 Pylint fix: add line at end of file * fa01367599 Keep a copy of the DEFAULT_API_OPTS and restore them after the test run * 2ad07634d9 Test clean up * fd2ee7db30 Add some simple unit tests for salt.config.api_config function * 3d2fefc83b Make sure the pidfile and log_file values are overridden by api opts * 1f6b540e46 Make sure the pidfile and log_file values are overridden by api opts * 04d307f917 salt-api no longer forces the default timeout * **PR** `#38707`_: (`alexbleotu`_) Fixed prepending of root_dir override to the other paths @ *2017-01-17 15:40:13 UTC* * 0fb6bb7b77 Merge pull request `#38707`_ from alexbleotu/root_dir_fix-gh * 0bac8c8be3 Fixed prepending of root_dir override to the other paths * **PR** `#38774`_: (`vutny`_) DOCS: add C++ compiler installation on RHEL required for bundled 0mq @ *2017-01-17 15:21:00 UTC* * 96c9dc10f7 Merge pull request `#38774`_ from vutny/dev-test-docs * 4620dc4afa DOCS: add C++ compiler installation on RHEL required for bundled 0mq * **PR** `#38749`_: (`vutny`_) pkg build modules throw better exception message if keyid wasn't found @ *2017-01-17 02:13:08 UTC* * aedfbb7a43 Merge pull request `#38749`_ from vutny/pkg-build-better-exception-msg * 53f2be5b21 pkg build modules throw better exception message if keyid wasn't found * **PR** `#38743`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2017-01-17 01:46:01 UTC* * 8466b34e82 Merge pull request `#38743`_ from rallytime/merge-2016.3 * d24776f5e9 Merge branch '2015.8' into '2016.3' * 6869621ed1 Merge pull request `#38731`_ from rallytime/merge-2015.8 * 9eb191b6ac Pylint fix * b910499dbe Various follow up fixes * e8309a6bbf Add release notes for 2015.8.13 * f881f366b7 Merge pull request `#20`_ from rallytime/2015.8.12_follow_up-batch-tests * 34282322c0 Clean up tests and docs for batch execution * c80b20b957 Merge pull request `#19`_ from whiteinge/batchclient * 3d8f3d18f6 Remove batch execution from NetapiClient and Saltnado * 97b0f64923 Lintfix * d1516664f7 Add explanation comment * 62f2c87080 Add docstring * 9b0a786aeb Explain what it is about and how to configure that * 5ea3579e10 Pick up a specified roster file from the configured locations * 3a8614c5df Disable custom rosters in API * c0e5a1171d Add roster disable flag * e9c59e9b8f Merge pull request `#38602`_ from terminalmage/fix-boto-test * 3424a108ac Fix failing unit.states.boto_vpc_test.BotoVpcRouteTableTestCase.test_present_with_routes * **ISSUE** `#38674`_: (`jackywu`_) There is no code to use parameter 'event_publisher_pub_hwm' in saltstack-2016.3 (refs: `#38723`_) * **PR** `#38723`_: (`rallytime`_) Remove "event_publisher_pub_hwm" and "salt_event_pub_hwm" from config/__init__.py @ *2017-01-15 18:36:14 UTC* * **PR** `#29294`_: (`skizunov`_) ZeroMQ no longer required when transport is TCP (refs: `#38723`_) * a642cdef79 Merge pull request `#38723`_ from rallytime/fix-38674 * 706c885f55 Remove "event_publisher_pub_hwm" and "salt_event_pub_hwm" from config/__init__.py * **PR** `#38669`_: (`rallytime`_) Update bootstrap script verstion to latest release @ *2017-01-15 18:03:27 UTC* * fc545af10b Merge pull request `#38669`_ from rallytime/update-bootstrap-script * 78ba76e34c Update bootstrap script verstion to latest release * **PR** `#38693`_: (`twangboy`_) Update jinja2 to 2.9.4 @ *2017-01-15 14:40:46 UTC* * 50d417f267 Merge pull request `#38693`_ from twangboy/update_jinja * e0c7e5549b Update jinja2 to 2.9.4 * **PR** `#38739`_: (`vutny`_) DOCS: correct examples of running test suite @ *2017-01-15 14:35:47 UTC* * f4233bb18d Merge pull request `#38739`_ from vutny/fix-runtests-doc * b872bb63f6 DOCS: correct examples of running test suite * **PR** `#38735`_: (`vutny`_) DOCS: add links to File State Backups page where necessary * **PR** `#38720`_: (`dereckson`_) Proofread jinja_to_execution_module tutorial * **ISSUE** `#36548`_: (`abonillasuse`_) openstack auth with nova driver (refs: `#38647`_) * **PR** `#38647`_: (`gtmanfred`_) Allow novaclient to use keystoneauth1 sessions for authentication @ *2017-01-10 17:48:26 UTC* * 7b850d472d Merge pull request `#38647`_ from gtmanfred/nova * 5be9b60851 add documentation about using keystoneauth for v3 * 7b657ca4ae add the ability to use keystone v2 and v3 * 5646ae1b34 add ability to use keystoneauth to authenticate in nova driver * **ISSUE** `#38648`_: (`ericuldall`_) No release file error from PPA on Ubuntu (refs: `#38650`_) * **ISSUE** `#38572`_: (`COLABORATI`_) ppa:saltstack/salt failure (refs: `#38650`_) * **ISSUE** `#34504`_: (`AvinashDeluxeVR`_) Installation documentation for Ubuntu server and Windows minion leads the user to use different salt versions. (refs: `#38650`_) * **PR** `#38650`_: (`rallytime`_) Remove the installation instructions for out-of-date community ppa @ *2017-01-10 17:47:45 UTC* * 383768d838 Merge pull request `#38650`_ from rallytime/remove-ubuntu-ppa-docs * 30429b2e44 Remove the installation instructions for out-of-date community ppa * **ISSUE** `#38087`_: (`UtahDave`_) The 'data' field in the return from a minion below a syndic is wrapped in an extra 'data' field. (refs: `#38657`_) * **PR** `#38657`_: (`DmitryKuzmenko`_) Publish the 'data' field content for Syndic evets @ *2017-01-10 16:59:33 UTC* * 7d9f56e3b5 Merge pull request `#38657`_ from DSRCorporation/bugs/38087_syndic_event_format_fix * 594c33f396 Publish the 'data' field content for Syndic evets * **PR** `#38649`_: (`Ch3LL`_) fix unit.modules.file_test @ *2017-01-10 16:44:45 UTC* * 83987511fd Merge pull request `#38649`_ from Ch3LL/test_apply_template * 47f8b68e0b fix unit.modules.file_test * **ISSUE** `#37355`_: (`Firewire2002`_) salt-ssh - ImportError: No module named backports.ssl_match_hostname (refs: `#38626`_, #`saltstack/salt`#37358`_`_, `#37358`_) * **ISSUE** `#34600`_: (`davidpsv17`_) Error trying a salt-ssh test.ping (refs: #`saltstack/salt`#37358`_`_, `#37358`_) * **ISSUE** `#27355`_: (`jerob`_) salt ssh error with debian 7 on target (refs: #`saltstack/salt`#37358`_`_, `#37358`_) * **PR** `saltstack/salt#37358`_: (`Firewire2002`_) Fix/workaround for issue `#37355`_ (refs: `#38626`_) * **PR** `#38626`_: (`cachedout`_) Revert "Fix/workaround for issue `#37355`_" @ *2017-01-06 21:28:09 UTC* * 74ddc71be3 Merge pull request `#38626`_ from saltstack/revert-37358-2016.3.3_issue37355 * e912ac99c2 Revert "Fix/workaround for issue `#37355`_" * **ISSUE** `#37355`_: (`Firewire2002`_) salt-ssh - ImportError: No module named backports.ssl_match_hostname (refs: `#38626`_, #`saltstack/salt`#37358`_`_, `#37358`_) * **ISSUE** `#34600`_: (`davidpsv17`_) Error trying a salt-ssh test.ping (refs: #`saltstack/salt`#37358`_`_, `#37358`_) * **ISSUE** `#27355`_: (`jerob`_) salt ssh error with debian 7 on target (refs: #`saltstack/salt`#37358`_`_, `#37358`_) * **PR** `#37358`_: (`Firewire2002`_) Fix/workaround for issue `#37355`_ @ *2017-01-06 18:58:47 UTC* * 5e58b32934 Merge pull request `#37358`_ from Firewire2002/2016.3.3_issue37355 * 910da18bfd fixed typo * 4fbc5ddd06 fixed wrong renamed variable and spaces * 92366e646c issue `#37355`_ * 7dc87ab7b8 issue `#37355`_ * 2878180405 issue `#37355`_ * **PR** `#35390`_: (`alexandr-orlov`_) Returns back missed proper grains dictionary for file module @ *2017-01-06 18:02:13 UTC* * 6c2fe615aa Merge pull request `#35390`_ from alexandr-orlov/2016.3 * cd5ae17e8d fxd missed proper grains dictionary * **ISSUE** `#38558`_: (`multani`_) pillar.get("...", default=var, merge=true) updates default value (refs: `#38579`_) * **PR** `#38618`_: (`rallytime`_) Back-port `#38579`_ to 2016.3 @ *2017-01-06 17:37:56 UTC* * **PR** `#38579`_: (`zwo-bot`_) Fix `#38558`_ - pillar.get with default= ...,merge=true influence subsequent calls of pillar.get (refs: `#38618`_) * 2579cfa42d Merge pull request `#38618`_ from rallytime/bp-38579 * 2052ecee2c Add copy import * 2c8845aaa0 add test for pillar.get() + default value * c2f98d2f04 ticket 38558: add unit test, deepcopy() only if necessary * 30ae0a1958 added deepcopy of default if merge=True * **PR** `#38601`_: (`terminalmage`_) pillar.get: Raise exception when merge=True and default is not a dict @ *2017-01-05 23:15:51 UTC* * da676cebd6 Merge pull request `#38601`_ from terminalmage/pillar-get * 8613d7254d pillar.get: Raise exception when merge=True and default is not a dict * **PR** `#38600`_: (`terminalmage`_) Avoid errors when sudo_user is set (2016.3 branch) @ *2017-01-05 20:57:09 UTC* * **PR** `#38598`_: (`terminalmage`_) Avoid errors when sudo_user is set (refs: `#38600`_) * 224fc7712a Merge pull request `#38600`_ from terminalmage/issue38459-2016.3 * 8a45b13e76 Avoid errors when sudo_user is set * **PR** `#38589`_: (`tobithiel`_) State Gem: fix incorrect warning about missing rvm/rbenv @ *2017-01-05 20:12:15 UTC* * a376970f88 Merge pull request `#38589`_ from tobithiel/fix_rvm_rbenv_warning * 9ec470b4a5 State Gem: fix incorrect warning about missing rvm/rbenv * **PR** `#38567`_: (`pass-by-value`_) Create queue if one doesn't exist @ *2017-01-05 18:46:11 UTC* * 02e6a78254 Merge pull request `#38567`_ from pass-by-value/pgjsonb_queue_changes_2016.3 * 67879ebe65 Create queue if one doesn't exist * **ISSUE** `#37498`_: (`githubcdr`_) service.restart salt-minion fails on Ubuntu 14.04.5 LTS (refs: `#37748`_, `#38587`_) * **PR** `#38587`_: (`rallytime`_) Change daemontools __virtualname__ from service to daemontools @ *2017-01-05 18:06:01 UTC* * 0889cbdb31 Merge pull request `#38587`_ from rallytime/fix-37498 * 2a5880966f Change daemontools __virtualname__ from service to daemontools * **PR** `#38562`_: (`rallytime`_) Update arch installation docs with correct package name @ *2017-01-04 20:04:28 UTC* * 7b74436d13 Merge pull request `#38562`_ from rallytime/arch-install-docs * 8b1897ace9 Update arch installation docs with correct package name * **PR** `#38560`_: (`Ch3LL`_) fix api logfile (refs: `#38585`_) @ *2017-01-04 19:03:17 UTC* * 01860702cb Merge pull request `#38560`_ from Ch3LL/fix_api_log * 1b45e9670b fix api logfile * **PR** `#38531`_: (`rallytime`_) Back-port `#33601`_ to 2016.3 @ *2017-01-04 16:56:53 UTC* * **PR** `#33601`_: (`mchugh19`_) Fix slack engine to run on python2.6 (refs: `#38531`_) * 0056620a53 Merge pull request `#38531`_ from rallytime/bp-33601 * c36cb39825 remove the unnecessary double trigger * 38414493bf fix spacing lint error * 8c1defc710 Remove uncessary type from alias commands. Deduplicate alias handling to autodetect function selection. Add error reporting to slack connectivty problems. Cleanup slack's unicode conversion * c2f23bc45e Fix slack engine to run on python2.6 * **ISSUE** `#38187`_: (`curiositycasualty`_) username/password saved as cleartext when using URIs with user:pass@ format (refs: `#38541`_) * **PR** `#38541`_: (`techhat`_) Strip user:pass from cached URLs @ *2017-01-04 15:39:57 UTC* * 50242c7f17 Merge pull request `#38541`_ from techhat/issue38187 * eae3a435dd Strip user:pass from cached URLs * **ISSUE** `#30454`_: (`favoretti`_) Using yaml serializer inside jinja template results in unicode being prepended by '!!python/unicode' (refs: `#30481`_, `#38554`_) * **PR** `#38554`_: (`multani`_) Fix YAML deserialization of unicode @ *2017-01-04 15:31:16 UTC* * **PR** `#30481`_: (`basepi`_) Add yaml_safe jinja filter (refs: `#38554`_) * 325dc56e59 Merge pull request `#38554`_ from multani/fix/30454 * 2e7f743371 yaml: support unicode serialization/deserialization * df76113c5c jinja: test the "yaml" filter with ordered dicts * f7712d417f Revert "Add yaml_safe filter" * **PR** `#38536`_: (`UtahDave`_) add note about pyVmomi locale workaround * **ISSUE** `#38353`_: (`Ch3LL`_) salt-cloud gce specifying (refs: `#38542`_) * **PR** `#38542`_: (`Ch3LL`_) fix gce image bug * **ISSUE** `#38449`_: (`swalladge`_) Parsing issues in `list_tab` (salt/modules/cron.py) (refs: `#38487`_) * **PR** `#38487`_: (`gtmanfred`_) Fix crontab issues with spaces @ *2017-01-01 20:33:29 UTC* * ec60f9c721 Merge pull request `#38487`_ from gtmanfred/2016.3 * 048b9f6b9d add test * c480c11528 allow spaces in cron env * c529ec8c34 allow crons to have multiple spaces * **ISSUE** `#37684`_: (`thusoy`_) State execution duration is timezone-dependent (refs: `#38491`_) * **PR** `#38491`_: (`gtmanfred`_) Use UTC for timing in case timezone changes @ *2017-01-01 20:30:57 UTC* * c5ba11b5e0 Merge pull request `#38491`_ from gtmanfred/timing * 79368c7528 Use UTC for timing in case timezone changes * **ISSUE** `#38472`_: (`jinm`_) file.managed Unable to manage file: 'hash_type' (2016.3.4) (refs: `#38503`_) * **PR** `#38503`_: (`jinm`_) Hash type fallback for file management @ *2017-01-01 17:36:51 UTC* * 86f0aa0bb3 Merge pull request `#38503`_ from jinm/issue_38472_jinm * 0cd9df299f Hash type fallback for file management * **PR** `#38457`_: (`bshelton229`_) Stops git.latest checking for local changes in a bare repo @ *2016-12-30 14:28:47 UTC* * ed2ba4bd1b Merge pull request `#38457`_ from bshelton229/git-latest-head-bug * 558e7a771a Stops git.latest checking for local changes in a bare repo * **PR** `#38385`_: (`dragon788`_) Use unambigous long names with double dashes @ *2016-12-29 17:10:48 UTC* * 36e21b22cb Merge pull request `#38385`_ from dragon788/2016.3-double-dash * 86c4b56f47 Newline for lint compat * 9d9b686057 Address review comments, consistency of quotes * df9bd5e7f9 Use unambigous long names with double dashes * **ISSUE** `#38209`_: (`limited`_) Accepting a minion causes tornado to exit (refs: `#38474`_) * **PR** `#38474`_: (`cachedout`_) Allow an existing ioloop to be passed to salt-key @ *2016-12-29 16:28:51 UTC* * 59f2560d88 Merge pull request `#38474`_ from cachedout/key_loop * de504538e1 Allow an existing ioloop to be passed to salt-key * **ISSUE** `#38438`_: (`jf`_) file.line with mode=delete breaks on empty file (refs: `#38467`_) * **PR** `#38467`_: (`gtmanfred`_) file.line fail with mode=delete @ *2016-12-28 20:00:33 UTC* * 3d0c752acd Merge pull request `#38467`_ from gtmanfred/2016.3 * 7b7c6b3878 file.line fail with mode=delete * **PR** `#38434`_: (`slinn0`_) Make sysctl.persist fail when failing to set a value into the running kernel @ *2016-12-27 15:37:53 UTC* * 940025d5c4 Merge pull request `#38434`_ from slinn0/issue_38433_fixes * 22af87a3fc Fixes for https://github.com/saltstack/salt/issues/38433 * **PR** `#38421`_: (`rallytime`_) Update deprecation notices to the correct version * **PR** `#38420`_: (`rallytime`_) Removed various deprecation notices from salt/modules/* files (refs: `#38421`_) * **ISSUE** `#38282`_: (`sash-kan`_) file.managed fails when file (which contains utf-characters in the name) exists (refs: `#38415`_) * **PR** `#38415`_: (`terminalmage`_) file.managed: Fix failure when filename contains unicode chars * **PR** `#38419`_: (`Ch3LL`_) fix scsci docs example @ *2016-12-22 18:57:51 UTC* * 2cdb59d055 Merge pull request `#38419`_ from Ch3LL/fix_doc_scsi * 234043b8bb fix scsci docs example * **PR** `#38407`_: (`terminalmage`_) Improve pillar documentation * **ISSUE** `#38372`_: (`fanirama`_) Issue with cron.file. Source: salt://path/to/crontab_file not found (refs: `#38398`_) * **PR** `#38398`_: (`terminalmage`_) Fix call to file.get_managed in cron.file state @ *2016-12-22 16:46:14 UTC* * 423b1fddff Merge pull request `#38398`_ from terminalmage/issue38372 * c80dbaa914 Fix call to file.get_managed in cron.file state * **PR** `#38382`_: (`heewa`_) Fix http.query when result has no text * **PR** `#38390`_: (`meaksh`_) Add "try-restart" to fix autorestarting on SUSE systems @ *2016-12-21 16:06:24 UTC* * b74b5c7d38 Merge pull request `#38390`_ from meaksh/2016.3-fix-try-restart-for-autorestarting-on-SUSE-systems * de6ec05ec0 add try-restart to fix autorestarting on SUSE systems * **PR** `#38221`_: (`UtahDave`_) Fix default returner @ *2016-12-20 20:34:36 UTC* * 2c3a39760a Merge pull request `#38221`_ from UtahDave/fix_default_returner * 385640765b remove a blank line to satisfy linter * 9c248aa14c validate return opt, remove default. * 8bb37f9fe7 specify allowed types and default for "returner" * 11863a4bfe add examples of default minion returners * e7c6012655 add support for default returners using `return` * **PR** `#38288`_: (`terminalmage`_) archive.extracted: don't try to cache local sources (2016.3 branch) @ *2016-12-18 13:07:11 UTC* * 09d9cff992 Merge pull request `#38288`_ from terminalmage/archive-extracted-local-source-2016.3 * 845e3d0e75 Update tests to reflect change in cache behavior * 5a08d7c70a archive.extracted: don't try to cache local sources (2016.3 branch) * **PR** `#38312`_: (`cro`_) Backport feature allowing proxy config to live in pillar OR /etc/salt/proxy @ *2016-12-18 12:39:01 UTC* * bf37667f8a Merge pull request `#38312`_ from cro/proxy_config_in_cfg * 2006c4000e Typo * 689d95b10f Backport feature allowing proxy config to live in pillar OR /etc/salt/proxy. * **ISSUE** `#12788`_: (`whiteinge`_) Comb through docs to replace :doc: roles with :ref: (refs: `#38320`_) * **PR** `#38320`_: (`rallytime`_) Cleanup doc internal markup references @ *2016-12-18 12:31:28 UTC* * c83db5a785 Merge pull request `#38320`_ from rallytime/cleanup-doc-refs * 62978cb7a0 Don't check the doc/conf.py file for doc markup refs * 770e732d76 Add a unit test to search for new doc markup refs * 5c42a361a0 Remove ":doc:" references from all doc/topics/installation/* files * 23bce1c929 Remove ":doc:" references from all doc/topics/releases/* files * 4aafa41d22 Remove ":doc:" references from a bunch of doc/* files * 02bfe7912c Remove more ":doc:" references from doc/* files * 6e32267d0c Remove ":doc:" references in salt/* files * **PR** `#38281`_: (`mikejford`_) Add nick to args for create_multi * **ISSUE** `#38290`_: (`dragon788`_) Need to use machine automation friendly output (refs: `#38313`_) * **PR** `#38313`_: (`dragon788`_) 2016.3 chocolatey fix @ *2016-12-16 17:20:39 UTC* * 235682b1e6 Merge pull request `#38313`_ from dragon788/2016.3-chocolatey-fix * 1f5fc17551 Use machine readable output for list * cdbd2fbe3c Added limit-output to eliminate false packages * **ISSUE** `#38174`_: (`NickDubelman`_) [syndic] Why can't a syndic node signal when all of its minions have returned? (refs: `#38279`_) * **ISSUE** `#32400`_: (`rallytime`_) Document Default Config Values (refs: `#38279`_) * **PR** `#38279`_: (`rallytime`_) Add docs for syndic_wait setting @ *2016-12-15 18:30:31 UTC* * 9e78ddc80e Merge pull request `#38279`_ from rallytime/fix-38174 * 4a62d01577 Add docs for syndic_wait setting * **PR** `#38248`_: (`meaksh`_) Successfully exit of salt-api child processes when SIGTERM is received @ *2016-12-15 09:16:27 UTC* * fc9e1dff35 Merge pull request `#38248`_ from meaksh/salt-api-successfully-close-child-processes * ee6eae9855 Successfully exit of salt-api child processes when SIGTERM. * **PR** `#38254`_: (`terminalmage`_) Also check if pillarenv is in opts @ *2016-12-15 09:10:24 UTC* * 3c718ed35e Merge pull request `#38254`_ from terminalmage/check-pillarenv * fa9ad311c6 Also check if pillarenv is in opts * **PR** `#38256`_: (`rallytime`_) [2016.3] Bump latest release version to 2016.11.1 * **PR** `#38198`_: (`vutny`_) Add missing requirements for running unit tests: libcloud and boto3 @ *2016-12-13 14:12:20 UTC* * 004e46afe7 Merge pull request `#38198`_ from vutny/unit-tests-require-libcloud-boto3 * a6098bac1a Remove note about SaltTesting installation, now it is in the requirements * 004bff113e Add missing requirements for running unit tests: libcloud and boto3 * **PR** `#38213`_: (`rallytime`_) Skip test_cert_info tls unit test on pyOpenSSL upstream errors @ *2016-12-13 12:05:01 UTC* * 9d497bc74c Merge pull request `#38213`_ from rallytime/skip-tls-test * bdb807fc7c Skip test_cert_info tls unit test on pyOpenSSL upstream errors * **PR** `#38224`_: (`whiteinge`_) Allow CORS OPTIONS requests to be unauthenticated @ *2016-12-13 12:02:30 UTC* * 203109dd17 Merge pull request `#38224`_ from whiteinge/cors-options-unauthed * de4d3227ab Allow CORS OPTIONS requests to be unauthenticated * **PR** `#38223`_: (`whiteinge`_) Add root_dir to salt-api file paths @ *2016-12-13 07:44:19 UTC* * **PR** `#37272`_: (`vutny`_) Get default logging level and log file from default opts dict (refs: `#38223`_) * 721a5feccd Merge pull request `#38223`_ from whiteinge/salt-api-root_dirs * bfbf390c0e Add root_dir to salt-api file paths * **ISSUE** `#38162`_: (`747project`_) git_pillar does not detect changes to remote repository when told to update (refs: `#38191`_) * **PR** `#38191`_: (`terminalmage`_) Clarify the fact that git_pillar.update does not fast-forward @ *2016-12-12 09:45:48 UTC* * 70f7d22ad6 Merge pull request `#38191`_ from terminalmage/issue38162 * 1ae543a98a Clarify the fact that git_pillar.update does not fast-forward * **PR** `#38194`_: (`vutny`_) Document the requirements for running ZeroMQ-based integration tests @ *2016-12-12 09:42:11 UTC* * 28171cbfc5 Merge pull request `#38194`_ from vutny/integration-test-requirements-doc * e9f419ff64 Document the requirements for running ZeroMQ-based integration tests * **PR** `#38185`_: (`rallytime`_) Back-port `#38181`_ to 2016.3 @ *2016-12-09 22:27:44 UTC* * **PR** `#38181`_: (`rallytime`_) Reset socket default timeout to None (fixes daemons_tests failures) (refs: `#38185`_) * a4ef037ab1 Merge pull request `#38185`_ from rallytime/bp-38181 * 609f814454 Reset socket default timeout to None (fixes daemons_tests failures) * **PR** `#38163`_: (`Ch3LL`_) enabled ec2 cloud tests @ *2016-12-09 18:01:57 UTC* * 65b2ad7b14 Merge pull request `#38163`_ from Ch3LL/enabled_ec2_cloud * be74c45463 enabled ec2 cloud tests * **PR** `#38177`_: (`vutny`_) Correct `cp.get_file_str` docstring and add integration tests @ *2016-12-09 16:55:35 UTC* * b63f74e034 Merge pull request `#38177`_ from vutny/fix-cp-get-file-str * a449980672 Correct `cp.get_file_str` docstring and add integration tests * **PR** `#38153`_: (`vutny`_) Master config includes may contain errors and be safely skipped @ *2016-12-08 17:43:34 UTC* * 7596313be0 Merge pull request `#38153`_ from vutny/master-includes-error-tolerance * cd0154ee93 Master config includes may contain errors and be safely skipped * **PR** `#38134`_: (`rallytime`_) Skip daemon unit tests when running on Python 2.6 * **ISSUE** `#38091`_: (`tjyang`_) [WARNING ] salt.loaded.int.module.zenoss.__virtual__() is wrongly returning `None`. (refs: `#38102`_) * **PR** `#38102`_: (`rallytime`_) Add False + msg tuple return if requests is missing for zenoss module @ *2016-12-07 13:24:37 UTC* * d3d98fd4eb Merge pull request `#38102`_ from rallytime/fix-38091 * 4f79d5a0d1 Add False + msg tuple return if requests is missing for zenoss module * **ISSUE** `#36707`_: (`do3meli`_) slow FreeBSD sysctl module with test=true (refs: `#36794`_) * **PR** `#38104`_: (`rallytime`_) Back-port `#36794`_ to 2016.3 @ *2016-12-07 13:23:48 UTC* * **PR** `#36794`_: (`do3meli`_) FreeBSD sysctl module now handels config_file parameter in show method (refs: `#38104`_) * 8c8cbc2734 Merge pull request `#38104`_ from rallytime/bp-36794 * c906c8a0d5 Pylint fixes * da3ebf83e6 FreeBSD sysctl module now handels config_file parameter in show method * **ISSUE** `#35342`_: (`morganwillcock`_) win_pkg: refresh_db doesn't remove cached items which have been renamed or removed (refs: `#38083`_) * **PR** `#38083`_: (`twangboy`_) Only delete .sls files from winrepo-ng [DO NOT MERGE FORWARD] @ *2016-12-06 14:13:35 UTC* * fbc87769b9 Merge pull request `#38083`_ from twangboy/fix_refresh_db * 978af6d83c Remove only .sls files from the cached winrepo-ng * **PR** `#38059`_: (`rallytime`_) Call exec_test for the Syndic daemon in tests.unit.daemons_test.py @ *2016-12-04 04:18:41 UTC* * **PR** `#38057`_: (`rallytime`_) [2016.11] Merge forward from 2016.3 to 2016.11 (refs: `#38059`_) * **PR** `#38034`_: (`cachedout`_) Modify daemons test to use multiprocessing (refs: `#38059`_) * 9dcfdeef6b Merge pull request `#38059`_ from rallytime/daemons-test-fix * eb372b27d8 Add missing "not" statement: The last syndic test should assertFalse() * 4e10f8e018 Call exec_test for the Syndic daemon in tests.unit.daemons_test.py * **ISSUE** `#37939`_: (`Talkless`_) file.comment always report changes in test=True mode (refs: `#38039`_) * **PR** `#38039`_: (`rallytime`_) Check to see if a line is already commented before moving on @ *2016-12-02 20:08:35 UTC* * 9cd42b9b3f Merge pull request `#38039`_ from rallytime/fix-37939 * 1da7aacfbe Update unit tests to account for additional file.search call * 8a685b1820 Check to see if a line is already commented before moving on * f2c045520d Write an integration test demonstrating the issue * **ISSUE** `#38037`_: (`dmurphy18`_) pkg.latest and yumpkg.latest_version return incorrect package versions 2016.3 and 2016.11 (refs: `#38045`_) * **PR** `#38045`_: (`terminalmage`_) yumpkg.py: don't include non-upgrade versions found by "yum list available" @ *2016-12-02 20:07:38 UTC* * a34a763984 Merge pull request `#38045`_ from terminalmage/issue38037 * 65289503d9 Simplify logic for matching desired pkg arch with actual pkg arch * 3babbcda94 yumpkg.py: don't include non-upgrade versions found by "yum list available" * **PR** `#38034`_: (`cachedout`_) Modify daemons test to use multiprocessing (refs: `#38059`_) * **PR** `#37995`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-30 20:12:55 UTC* * 6942d5d95b Merge pull request `#37995`_ from rallytime/merge-2016.3 * b44e17921c Merge branch '2015.8' into '2016.3' * 7a7e36728f Merge pull request `#37978`_ from terminalmage/ext_pillar_first-docs * 61ed9a8657 Add clarifying language to ext_pillar_first docs * **PR** `#38002`_: (`laleocen`_) fix broken yaml code block * **ISSUE** `#35088`_: (`Modulus`_) salt/cloud/ec2.py encoding problems. (refs: `#37912`_) * **PR** `#37912`_: (`attiasr`_) fix encoding problem aws responses @ *2016-11-30 18:10:30 UTC* * 3dd45fbedf Merge pull request `#37912`_ from attiasr/fix_aws_response_encoding * ba4ec4e7f1 use Requests result encoding to encode the text * abe4eb3b98 fix encoding problem aws responses * **PR** `#37950`_: (`vutny`_) Set default Salt Master address for a Syndic (like for a Minion) @ *2016-11-30 18:09:04 UTC* * 69a74a4d2d Merge pull request `#37950`_ from vutny/fix-starting-up-syndic * 7d9bc9abce syndic_master: correct default value, documentation and example config * 92a7c7ed1b Set default Salt Master address for a Syndic (like for a Minion) * **PR** `#37964`_: (`terminalmage`_) Add clarification on expr_form usage and future deprecation * **ISSUE** `#37867`_: (`tobiasBora`_) Bug into lsb_release that crash salt (refs: `#37962`_) * **PR** `#37962`_: (`cachedout`_) Catch possible exception from lsb_release * **ISSUE** `#37945`_: (`gstachowiak`_) Missing exception handling in salt.master.Maintenance. Process never completes. (refs: `#37961`_) * **PR** `#37961`_: (`cachedout`_) Handle empty tokens safely * **PR** `#37272`_: (`vutny`_) Get default logging level and log file from default opts dict (refs: `#38223`_) @ *2016-11-28 23:04:20 UTC* * ea46639ce7 Merge pull request `#37272`_ from vutny/fix-getting-default-logging-opts * e5ce52388a Fix description in the Salt Syndic usage info * 518a3dd7ee Add unit tests for Salt parsers processing logging options * 83d6a44254 Add `ssh_log_file` option to master config and documentation * c8a0915460 Fix configuration example and documentation for `syndic_log_file` option * e64dd3ed6b Correct default attributes for various parser classes * 82a2e216b3 Fix default usage string for Salt command line programs * 45dffa292f Fix readding and updating logfile and pidfile config options for Salt API * f47253c21b Fix reading and applying Salt Cloud default configuration * fad5bec936 Work with a copy of default opts dictionaries * b7c24811e5 Fix `log_level_logfile` config value type * 1bd76a1d96 Fix setting temporary log level if CLI option omitted * 121848cc77 Fix obtaining `log_granular_levels` config setting * 44cf07fec2 Make CLI options take precedence for setting up logfile_logger * 61afaf1792 Fix setting option attributes when processing `log_level` and `log_file` * 3c60e2388e Fix processing of `log_level_logfile` config setting * 55a0af5bbd Use attribute functions for getting/setting options and config values * c25f2d091e Fix getting Salt API default logfile option * f2422373c1 Remove processing of unused and undocumented `cli_*_log_*` config options * 2065e8311c Get default logging level and file from default opts dict * **PR** `#37925`_: (`kontrolld`_) Fix missing ipv6 options centos network @ *2016-11-28 22:38:43 UTC* * f2f957da6c Merge pull request `#37925`_ from kontrolld/add-ipv6-centos-network * ac2b477412 Adding IPv6 functionality for CentOS /etc/sysconfig/network * **ISSUE** `#37059`_: (`basepi`_) Beacon fileserver operations cause scheduled jobs with fileserver operations to hang (refs: `#37899`_) * **PR** `#37899`_: (`DmitryKuzmenko`_) Clear functions context in schedule tasks for ZeroMQ. @ *2016-11-28 22:23:45 UTC* * c07ad11279 Merge pull request `#37899`_ from DSRCorporation/bugs/37059_schedule_task_hang * 9497748546 Clear functions context in schedule tasks for ZeroMQ. * **ISSUE** `#37737`_: (`b-harper`_) python client api CloudClient multiple calls needed (refs: `#37928`_) * **PR** `#37928`_: (`techhat`_) Don't modify self.opts directly @ *2016-11-28 21:07:40 UTC* * a55519db40 Merge pull request `#37928`_ from techhat/issue37737 * a09a60e89b Don't modify self.opts directly * **PR** `#37929`_: (`gtmanfred`_) add list_nodes_min to nova driver @ *2016-11-28 21:05:40 UTC* * 9d17f1ce90 Merge pull request `#37929`_ from gtmanfred/2016.3 * c7d2c73503 add list_nodes_min to nova driver * **PR** `#37926`_: (`kontrolld`_) Fixes no IPv6 functionality in /etc/sysconfig/network @ *2016-11-28 20:40:00 UTC* * 3bb743b59f Merge pull request `#37926`_ from kontrolld/fix-ipv6-centos-network * 3ed42e5b44 updated * 3b3bc4f239 Fixes no IPv6 functionality in /etc/sysconfig/network * **PR** `#37921`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-28 19:54:40 UTC* * 271170a9f3 Merge pull request `#37921`_ from rallytime/merge-2016.3 * 523a67c422 Merge branch '2015.8' into '2016.3' * 4cdc6cf5ec Update earlier release channels' docs with Carbon release notes (`#37914`_) * d31491a7fe [2015.8] Update version numbers in doc config for 2016.11.0 release (`#37918`_) * **PR** `#37924`_: (`cachedout`_) Update test for new gem ver @ *2016-11-28 18:17:53 UTC* * 6cd6429ac0 Merge pull request `#37924`_ from cachedout/fix_gem_states * 894cca3427 Update test for new gem ver * **PR** `#37916`_: (`rallytime`_) [2016.3] Update version numbers in doc config for 2016.11.0 release @ *2016-11-28 17:09:08 UTC* * c35ba1f390 Merge pull request `#37916`_ from rallytime/doc-update-2016.3 * bd40592289 [2016.3] Update version numbers in doc config for 2016.11.0 release * **ISSUE** `#37287`_: (`aaronm-cloudtek`_) salt.states.ddns.present: 'NS' record type always returns as changed (refs: `#37785`_) * **PR** `#37785`_: (`aaronm-cloudtek`_) respect trailing dot in ddns name parameter @ *2016-11-28 14:02:10 UTC* * e13a2488c8 Merge pull request `#37785`_ from Cloudtek/ddns-respect-trailing-dot * 262e3b3697 respect trailing dot in ddns name parameter * **ISSUE** `#37870`_: (`fj40crawler`_) salt.states.augeas.change returns None when test=True (refs: `#37895`_) * **PR** `#37895`_: (`fj40crawler`_) Change return value for salt/states/augeas.py to be True instead of N… @ *2016-11-28 13:49:27 UTC* * c03b389422 Merge pull request `#37895`_ from fj40crawler/fix-augeas-return-for-test * ddc238df36 Fixed augeas_test.py to match True v.s. None for test_change_in_test_mode * ef75c459c0 Merge branch '2016.3' of github.com:saltstack/salt into fix-augeas-return-for-test * b0fe0cd256 Change return value for salt/states/augeas.py to be True instead of None for cases where salt is run with test=True. Fixes `#37870`_ * **PR** `#37907`_: (`Talkless`_) Fix server trust in test run of svn.latest @ *2016-11-28 13:47:39 UTC* * fdbc31e8d8 Merge pull request `#37907`_ from Talkless/patch-2 * 072a319490 Fix server trust in test run of svn.latest * **PR** `#37896`_: (`toanju`_) rh networking: add missing values @ *2016-11-27 10:30:35 UTC* * f39fdf443f Merge pull request `#37896`_ from toanju/2016.3 * c95304188e rh networking: add missing values * **PR** `#37886`_: (`bdrung`_) Fix various spelling mistakes @ *2016-11-25 02:59:36 UTC* * ea935c5a91 Merge pull request `#37886`_ from bdrung/fix-typos * 9a51ba5c5b Fix various spelling mistakes * **ISSUE** `#37732`_: (`dhaines`_) list_semod() (from modules/selinux.py) incompatible with policycoreutils-2.5 (RHEL 7.3) (refs: `#37736`_) * **PR** `#37736`_: (`dhaines`_) handle semodule version >=2.4 (`#37732`_) and fix typo @ *2016-11-24 01:44:20 UTC* * 371b0a86d9 Merge pull request `#37736`_ from dhaines/issue-37732 * 7ef590a505 Update selinux.py * 516a67e6a3 fix indexing error * 4e49c1e991 fix typo * b16f2d8400 handle semodule version >=2.4 (`#37732`_) and fix typo * **PR** `#37797`_: (`clan`_) check count of columns after split @ *2016-11-24 01:28:59 UTC* * 87aeb66fbf Merge pull request `#37797`_ from clan/extfs * acf0f960ef check count of columns after split * **PR** `#37762`_: (`twangboy`_) Add pre_versions to chocolatey.installed @ *2016-11-24 01:27:29 UTC* * f7c7109152 Merge pull request `#37762`_ from twangboy/fix_chocolatey_state * 9696b6dfa5 Use keyword args instead of relying on ordering * 398eaa074d Add pre_versions to the available arguments * **PR** `#37866`_: (`meaksh`_) Backport `#37149`_ `#36938`_ and `#36784`_ to 2016.3 @ *2016-11-23 21:54:17 UTC* * **PR** `#37857`_: (`meaksh`_) Backport `#37149`_ and `#36938`_ to 2015.8 (refs: `#37866`_) * **PR** `#37856`_: (`meaksh`_) Backport `#36784`_ to 2015.8 (refs: `#37866`_) * **PR** `#37149`_: (`dincamihai`_) Fix pkg.latest_version when latest already installed (refs: `#37857`_, `#37866`_) * **PR** `#36938`_: (`wanparo`_) acl.delfacl: fix position of -X option to setfacl (refs: `#37857`_, `#37866`_) * **PR** `#36784`_: (`moio`_) OS grains for SLES Expanded Support (refs: `#37856`_, `#37866`_) * 56baa92d55 Merge pull request `#37866`_ from meaksh/2016.3-bp-37149-36938-36784 * 9d8d578109 Fix pkg.latest_version when latest already installed * ffca0d491c - acl.delfacl: fix position of -X option to setfacl * 3dfed6b841 Adjust linux_acl unit test argument ordering * f185ecdde1 core.py: quote style fixed * 8404d13424 Setting up OS grains for SLES Expanded Support (SUSE's Red Hat compatible platform) * **ISSUE** `#32829`_: (`tyhunt99`_) Dockerng appears to not be using docker registries pillar data (refs: `#36893`_) * **PR** `#37863`_: (`rallytime`_) Back-port `#36893`_ to 2016.3 @ *2016-11-23 17:09:09 UTC* * **PR** `#36893`_: (`tyhunt99`_) add option to force a reauth for a docker registry (refs: `#37863`_) * d0cc7f0d56 Merge pull request `#37863`_ from rallytime/bp-36893 * 4c70534991 Add versionadded to reauth option in dockerng module * 5ca2c388c2 added documentation for the new reuth option in docker registry configuration * 5b0c11ab47 add option to force a reauth for a docker registry * **PR** `#37847`_: (`laleocen`_) add multiline encryption documentation to nacl * **ISSUE** `#37787`_: (`elyulka`_) user.present state fails to change loginclass on FreeBSD (refs: `#37827`_) * **PR** `#37827`_: (`silenius`_) add missing chloginclass * **PR** `#37826`_: (`rallytime`_) Update branch refs to more relevant branch * **PR** `#37822`_: (`laleocen`_) add documentation for multiline encryption using nacl (refs: `#37826`_) * **ISSUE** `#19269`_: (`markuskramerIgitt`_) Undocumented feature `names:` of `file.directory` (refs: `#37823`_) * **PR** `#37823`_: (`rallytime`_) Add "names" option to file state docs: point users to highstate doc examples * **ISSUE** `#15697`_: (`arthurlogilab`_) keystone.user_present should not re-set the password when user exists (refs: `#37821`_) * **PR** `#37821`_: (`rallytime`_) Clarify keystone.user_present password state docs with default behavior * **ISSUE** `#5999`_: (`pille`_) libvirt.keys does not work (refs: `#37820`_) * **PR** `#37820`_: (`rallytime`_) Add some dependency documentation to libvirt docs * **PR** `#37772`_: (`bdrung`_) Support initializing OpenSSL 1.1 @ *2016-11-21 20:28:51 UTC* * 485270f74e Merge pull request `#37772`_ from bdrung/openssl1.1 * 819c9658ed Support initializing OpenSSL 1.1 * **ISSUE** `#37383`_: (`edwardsdanielj`_) Orchestration arguments (kwarg) not being interperted / How I learned to stop worrying about documentation and love experimenting (refs: `#37817`_) * **PR** `#37817`_: (`rallytime`_) Update orchestrate runner file.copy doc example * **ISSUE** `#37653`_: (`gravyboat`_) Salt.cron docs don't wrap @hourly and @daily correctly in quotes for the examples (refs: `#37816`_) * **ISSUE** `#31953`_: (`sjorge`_) Documentation for salt.states.cron is incorrect (refs: `#32157`_) * **PR** `#37816`_: (`rallytime`_) Back-port `#32157`_ to 2016.3 @ *2016-11-21 20:22:27 UTC* * **PR** `#32157`_: (`cachedout`_) Add quotes to cron doc (refs: `#37816`_) * c5d3d8b66a Merge pull request `#37816`_ from rallytime/bp-32157 * d9c297119e Add quotes to cron doc * **PR** `#37812`_: (`rallytime`_) Back-port `#37790`_ to 2016.3 @ *2016-11-21 18:46:40 UTC* * **PR** `#37790`_: (`sofixa`_) Update cloud/proxmox.rst with more options and LXC (refs: `#37812`_) * 97e6b6aabe Merge pull request `#37812`_ from rallytime/bp-37790 * ca3b6e7874 Update proxmox.rst with more options and LXC * **ISSUE** `#37751`_: (`freach`_) Documentation salt.states.dockerng.running: "privileged" property undocumented (refs: `#37789`_) * **PR** `#37811`_: (`rallytime`_) Back-port `#37789`_ to 2016.3 @ *2016-11-21 18:46:21 UTC* * **PR** `#37789`_: (`fedusia`_) issue: 37751 (refs: `#37811`_) * 27703c54bc Merge pull request `#37811`_ from rallytime/bp-37789 * ba3fef48e1 fix comment * a021f76a9b issue: 37751 Add documentation for option privileged * **PR** `#37810`_: (`rallytime`_) Back-port `#37775`_ to 2016.3 @ *2016-11-21 18:45:53 UTC* * **PR** `#37775`_: (`calve`_) Document `python` argument in `salt.states.virtualenv_mod` (refs: `#37810`_) * adac9d7c0c Merge pull request `#37810`_ from rallytime/bp-37775 * 2bed91437b Document `python` argument in `salt.states.virtualenv_mod` * **ISSUE** `#37742`_: (`blaketmiller`_) Cannot match on nodegroup when checking minions (refs: `#37763`_) * **PR** `#37763`_: (`cachedout`_) Add nodegroup check to ckminions * **ISSUE** `#37725`_: (`secumod`_) salt-call incorrectly parses master hostname:port from minion config (refs: `#37766`_) * **PR** `#37766`_: (`cachedout`_) Fix ip/port issue with salt-call * **ISSUE** `#33709`_: (`msummers42`_) Any/All Salt-SSH invocations in 2016.3.0 Fails with AttributeError: 'module' object has no attribute 'BASE_THORIUM_ROOTS_DIR' (refs: `#37767`_) * **PR** `#37767`_: (`cachedout`_) Add thorium path to syspaths * **PR** `#37760`_: (`hu-dabao`_) Fix couchbase returner and add couple of more features @ *2016-11-18 00:28:23 UTC* * bff949f4e9 Merge pull request `#37760`_ from hu-dabao/fix_cb_returner * de372f277e 1. returner no need to check whether the jid exists for external job cache setup 2. add full_ret to return doc so that the document will be informative 3. make ttl as a config attribute because salt-minion does not have keep_jobs attribute 4. add password into config attribute 5. update the documents accordingly * **ISSUE** `#36629`_: (`yhekma`_) The pillar run module does not honor saltenv (refs: `#37738`_) * **PR** `#37738`_: (`terminalmage`_) Allow pillar.get to retrieve fresh pillar data when saltenv passed @ *2016-11-17 23:13:04 UTC* * 1f976ac212 Merge pull request `#37738`_ from terminalmage/issue36629 * da46678c51 Allow pillar.get to retrieve fresh pillar data when saltenv passed * **PR** `#37745`_: (`cro`_) Switch default filter tag for ONE resources from user only to all resources * **ISSUE** `#37498`_: (`githubcdr`_) service.restart salt-minion fails on Ubuntu 14.04.5 LTS (refs: `#37748`_, `#38587`_) * **PR** `#37748`_: (`silenius`_) check for SERVICE_DIR in __virtual__ in salt.modules.daemontools * **ISSUE** `#37734`_: (`Ch3LL`_) Joyent Cloud Size Issue (refs: `#37735`_) * **PR** `#37735`_: (`Ch3LL`_) change size and image of joyent profile @ *2016-11-16 21:07:52 UTC* * fa7883115e Merge pull request `#37735`_ from Ch3LL/fix_joyent_profile * 9ef41dcdfc change size and image of joyent profile * **PR** `#37731`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-16 17:13:02 UTC* * 98e25c68aa Merge pull request `#37731`_ from rallytime/merge-2016.3 * ec1389711f Merge branch '2015.8' into '2016.3' * f417dbbe99 Merge pull request `#37718`_ from terminalmage/docs * 23b8b2a3f4 Fix incorrectly-formatted RST * **PR** `#37724`_: (`cachedout`_) Warn on AES test for systems with > 1 core * **PR** `#37721`_: (`terminalmage`_) Fix for pillar setting 'environment' key in __gen_opts() @ *2016-11-16 16:04:53 UTC* * 35655d521f Merge pull request `#37721`_ from terminalmage/zd909 * acdd5513da Update git_pillar docs to reflect info from bugfix * 433737d2dc Fix for pillar setting 'environment' key in __gen_opts() * **PR** `#37719`_: (`terminalmage`_) Fix incorrectly-formatted RST (2016.3 branch) @ *2016-11-16 08:20:53 UTC* * 99cda7c003 Merge pull request `#37719`_ from terminalmage/docs-2016.3 * f163b4c724 Fix incorrectly-formatted RST * **PR** `#37694`_: (`cachedout`_) Catch differences in git URLs in npm state @ *2016-11-16 01:56:18 UTC* * 8dea695c7c Merge pull request `#37694`_ from cachedout/npm_git * 0e3bc2366a Catch differences in git URLs in npm state * **ISSUE** `#37665`_: (`kluoto`_) boto_elb state fails as key is overwritten by the code (refs: `#37705`_) * **PR** `#37705`_: (`rallytime`_) Don't overwrite the "key" variable passed in to _listeners_present func @ *2016-11-15 21:26:37 UTC* * 329448ccd7 Merge pull request `#37705`_ from rallytime/fix-37665 * 3b7e9c5e3b Don't overwrite the "key" variable passed in to _listeners_present func * **PR** `#37707`_: (`Ch3LL`_) add timeout increase on azure tests @ *2016-11-15 21:24:25 UTC* * **PR** `#37239`_: (`Ch3LL`_) Fix cloud tests timeout (refs: `#37707`_) * ac9a316b50 Merge pull request `#37707`_ from Ch3LL/fix_timeout_azure * 363122c675 add timeout increase on azure tests * **PR** `#37704`_: (`twangboy`_) Fix test disabled 2016.3 [DO NOT MERGE FORWARD] @ *2016-11-15 16:48:52 UTC* * 1ece265354 Merge pull request `#37704`_ from twangboy/fix_test_disabled_2016.3 * a0429cf839 Use nfsd instead of apsd for test_disabled * **PR** `#37690`_: (`twangboy`_) Update pyzmq to 15.3.0 for 2016.3 [DO NOT MERGE FORWARD] @ *2016-11-15 03:10:36 UTC* * 44f05acbff Merge pull request `#37690`_ from twangboy/update_pyzmq_2016.3 * cf55342150 Update pyzmq to version 15.3.0 * **PR** `#37680`_: (`rallytime`_) Back-port `#32965`_ to 2016.3 @ *2016-11-15 02:56:46 UTC* * **PR** `#32965`_: (`kevinquinnyo`_) Fix 'present' option when used without 'key_type' (refs: `#37680`_) * a743d8b5e6 Merge pull request `#37680`_ from rallytime/bp-32965 * 1865b13645 Fix 'present' option when used without 'key_type' * **ISSUE** `#35964`_: (`edgan`_) salt-ssh doesn't set the return code to non-zero on highstate rendering error (refs: `#35965`_) * **PR** `#37681`_: (`rallytime`_) Back-port `#35965`_ to 2016.3 @ *2016-11-14 21:19:22 UTC* * **PR** `#35965`_: (`edgan`_) Set the return code to 1 on salt-ssh highstate errors (refs: `#37681`_) * 1c2d6ff293 Merge pull request `#37681`_ from rallytime/bp-35965 * 700f3fa57f Set the return code to 1 on salt-ssh highstate errors * **PR** `#37668`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-14 15:43:25 UTC* * 1b456b55dc Merge pull request `#37668`_ from rallytime/merge-2016.3 * ef684c6b02 Merge branch '2015.8' into '2016.3' * a01b66556f Add docs for rotate_aes_key (`#37641`_) * **ISSUE** `#37492`_: (`JensRantil`_) Failing `salt -b 1 minion state.highstate` has wrong exit code (refs: `#37625`_) * **PR** `#37625`_: (`cachedout`_) Return with proper retcodes in batch mode @ *2016-11-12 20:29:09 UTC* * 305e51d1c0 Merge pull request `#37625`_ from cachedout/issue_37492 * b6031524e5 Return with proper retcodes in batch mode * **ISSUE** `#34547`_: (`sebw`_) salt-cloud deployment fails when deploy: True (refs: `#37607`_) * **PR** `#37639`_: (`rallytime`_) Back-port `#37607`_ to 2016.3 @ *2016-11-11 20:29:20 UTC* * **PR** `#37607`_: (`techhat`_) Try the connection again, in case it's been reset (refs: `#37639`_) * **PR** `#35673`_: (`cro`_) Proxies don't handle reusing the SmartConnect instances very well. D… (refs: `#37607`_) * **PR** `#34059`_: (`alexbleotu`_) Vmware common gh (refs: `#37607`_) * 7510cd4da9 Merge pull request `#37639`_ from rallytime/bp-37607 * 9914c93bc4 Pylint: Remove kwargs that are not in the 2016.3 branch * d941e9354d Disable pylint warning * 940ee49a0b Lint fix * 69893f0c38 Try the connection again, in case it's been reset * **ISSUE** `saltstack/salt#37118`_: (`gtmanfred`_) group in file.find module unable to be a list (refs: `#37349`_) * **ISSUE** `#37118`_: (`gtmanfred`_) group in file.find module unable to be a list (refs: `#37349`_) * **PR** `#37638`_: (`rallytime`_) Back-port `#37349`_ to 2016.3 @ *2016-11-11 20:29:01 UTC* * **PR** `#37349`_: (`haeac`_) Pull request for Bug `#37118`_ (refs: `#37638`_) * 24ca96010d Merge pull request `#37638`_ from rallytime/bp-37349 * ba2105bc39 Fix for Bug `#37118`_, the wrong parameter was being used to convert the group name to group id. * **ISSUE** `#37643`_: (`Ch3LL`_) digital ocean list_keypairs limits to 20 keys (refs: `#37644`_) * **PR** `#37644`_: (`Ch3LL`_) digital ocean list_keypairs: increase limit for ssh keys parsed @ *2016-11-11 20:28:46 UTC* * e1e8b81d16 Merge pull request `#37644`_ from Ch3LL/fix_37643 * c02961a2f5 list_keypairs: increase limit for ssh keys parsed * **ISSUE** `#37541`_: (`yhekma`_) salt-minion does not clean up temp files for templates (refs: `#37540`_, `#37640`_) * **PR** `#37640`_: (`rallytime`_) Add known issue `#37541`_ to 2016.3.4 release notes @ *2016-11-11 20:28:12 UTC* * a97c2ad34b Merge pull request `#37640`_ from rallytime/update-release-notes * 6d6de12aff Grammatical fix * 24d7f20e16 Add known issue `#37541`_ to 2016.3.4 release notes * **PR** `#37642`_: (`cro`_) Forward-port change from 2015.8 adding release note for rotate_aes_key @ *2016-11-11 20:27:07 UTC* * fab3eaa237 Merge pull request `#37642`_ from cro/rotate_aes_doc * 1ca5b958c6 Forward-port change from 2015.8 adding release note for rotate_aes_key * **ISSUE** `#37628`_: (`TronPaul`_) [git 2016.3] Refreshing of an s3 file server results in an exception (refs: `#37629`_) * **PR** `#37629`_: (`TronPaul`_) fix __opts__ and provider being None in salt.utils.aws:get_location @ *2016-11-11 09:49:47 UTC* * 4c07b3534a Merge pull request `#37629`_ from TronPaul/fix-s3fs-opts * a452cded20 fix __opts__ and provider being None issue * **PR** `#37481`_: (`thatch45`_) Raet internal client reference fix @ *2016-11-11 04:39:41 UTC* * 200d9fcb6e Merge pull request `#37481`_ from thatch45/raet_client * 50d911160b Attempted fix, needs user verification * **PR** `#37611`_: (`jeanpralo`_) Fix cmd batch raw @ *2016-11-11 02:53:58 UTC* * b14faf1c68 Merge pull request `#37611`_ from jeanpralo/fix-cmd-batch-raw * 4f16840ef1 add integration test for salt.client.LocalClient.cmd_batch * ead47e4bba update ret dict to avoid hanging * 0a2f153b6e fix dict key for raw support to avoid exception * **PR** `#37614`_: (`gtmanfred`_) remove redundant code @ *2016-11-11 02:49:13 UTC* * 35c8333d04 Merge pull request `#37614`_ from gtmanfred/2016.3 * 71c2df89a9 remove redundent code * **PR** `#37627`_: (`cachedout`_) Exempt pip.iteritems from test_valid_docs test @ *2016-11-11 02:48:37 UTC* * 4fab707bdd Merge pull request `#37627`_ from cachedout/pr-36706 * 94df2f8e6f Exempt pip.iteritems from test_valid_docs test * **ISSUE** `#36644`_: (`b1naryth1ef`_) env_vars not properly validated/casted to strings w/ virtualenv.manage/pip.install (refs: `#36706`_) * **PR** `#36706`_: (`siccrusher`_) Add basic sanity checks for env_vars in pip.install function @ *2016-11-11 02:47:16 UTC* * ee74f3116e Merge pull request `#36706`_ from siccrusher/fix_env_var_validation * fb27f8b69e Revert change * 79f3e83f8d Use fully-qualified path for six * 0ca1222833 Update pip.py * b15de371c1 * Ensure src is python3 compatible * 0976a2d1ae * Before passing on the env_vars dictionary ensure all values are strings. Fixes `#36644`_ * **ISSUE** `#37491`_: (`JensRantil`_) "Failed to authenticate! ..." error should exit non-zero (refs: `#37626`_) * **PR** `#37626`_: (`cachedout`_) Exit with proper retcode on hard client failures @ *2016-11-11 02:38:47 UTC* * 902a97575e Merge pull request `#37626`_ from cachedout/issue_37491 * bab9a729b1 Exit with proper retcode on hard client failures * **PR** `#37617`_: (`terminalmage`_) Clarify docs for git_pillar dynamic env feature @ *2016-11-11 01:52:52 UTC* * 845f835177 Merge pull request `#37617`_ from terminalmage/git_pillar-docs * 8cdf5dbb51 Clarify docs for git_pillar dynamic env feature * **PR** `#36627`_: (`sjmh`_) Skip rest of loop on failed func match @ *2016-11-10 23:47:12 UTC* * 3079d78332 Merge pull request `#36627`_ from sjmh/fix/auth_skip_nomatch * b3baaf30d0 Skip rest of loop on failed func match * **PR** `#37600`_: (`mcalmer`_) change TIMEZONE on SUSE systems (bsc#1008933) @ *2016-11-10 21:54:04 UTC* * a71e7c77b3 Merge pull request `#37600`_ from mcalmer/fix-timezone-on-SUSE * 3530b542f0 change TIMEZONE on SUSE systems (bsc#1008933) * **ISSUE** `#37238`_: (`cmclaughlin`_) Restarting master causes minion to hang (refs: `#37438`_, `#37602`_) * **ISSUE** `#37018`_: (`tsaridas`_) get events from python (refs: `#37438`_, `#37602`_) * **PR** `#37602`_: (`DmitryKuzmenko`_) Handle master restart in appropriate places using `salt.event` listener. @ *2016-11-10 21:53:20 UTC* * **PR** `#37438`_: (`DmitryKuzmenko`_) Fix for `#37238`_ salt hang on master restart (refs: `#37602`_) * 39b75878cf Merge pull request `#37602`_ from DSRCorporation/bugs/37238_salt_hang_on_master_restart * d3d987b19c Handle master restart in appropriate places using `salt.event` listener. * **PR** `#37608`_: (`gtmanfred`_) allow multiline returns from docker for mac @ *2016-11-10 21:48:59 UTC* * 019e1a721b Merge pull request `#37608`_ from gtmanfred/2016.3 * 74aee1e372 allow multiline returns from docker for mac * **ISSUE** `#37592`_: (`craigafinch`_) State git.latest does not work with SSH (refs: `#37604`_) * **ISSUE** `#37551`_: (`viict`_) git.latest "Not a valid commit name" (refs: `#37604`_, `#37571`_) * **PR** `#37604`_: (`terminalmage`_) Documentation improvements and corrections * **PR** `#37579`_: (`pass-by-value`_) Use existing VM's VDD size if not specified in the cloud profile * **ISSUE** `#37541`_: (`yhekma`_) salt-minion does not clean up temp files for templates (refs: `#37540`_, `#37640`_) * **PR** `#37540`_: (`yhekma`_) Added prefix to tempfile for template @ *2016-11-10 00:37:18 UTC* * fdd13b4145 Merge pull request `#37540`_ from yhekma/2016.3 * 93a59f8034 Added prefix to tempfile for template * **ISSUE** `#37084`_: (`aaronm-cloudtek`_) x509.certificate_managed does not work with m2crypto >=0.25 (refs: `#37578`_) * **PR** `#37578`_: (`clinta`_) Update for m2crypto changes removing lhash * **PR** `#37584`_: (`clinta`_) Fix eauth example for limiting args * **ISSUE** `#37551`_: (`viict`_) git.latest "Not a valid commit name" (refs: `#37604`_, `#37571`_) * **PR** `#37571`_: (`terminalmage`_) Add a test to ensure we don't check for fast-forward before fetching * **ISSUE** `#33645`_: (`ketzacoatl`_) saltutil.sync_all does not sync custom pillar modules to masterless minions (refs: `#33833`_) * **ISSUE** `#25297`_: (`Akilesh1597`_) perform 'refresh_pillar' as a part of 'sync_all' (refs: `#25361`_, `#37521`_) * **PR** `#37553`_: (`rallytime`_) Back-port `#37521`_ to 2016.3 @ *2016-11-08 23:11:07 UTC* * **PR** `#37521`_: (`genuss`_) refresh_pillar() should be called always with refresh=True during saltutil.sync_all (refs: `#37553`_) * **PR** `#33833`_: (`terminalmage`_) Support syncing pillar modules to masterless minions (refs: `#37521`_) * **PR** `#25361`_: (`tedski`_) perform `refresh_pillar` as part of `sync_all` when `refresh=True` (refs: `#37521`_) * b01c247ea9 Merge pull request `#37553`_ from rallytime/bp-37521 * 30f92b05f4 refresh_pillar() should be called always * **PR** `saltstack/salt#37549`_: (`Mrten`_) sqlite is not found in 2015.8 (refs: `#37565`_) * **PR** `#37565`_: (`rallytime`_) Back-port `#37549`_ to 2016.3 @ *2016-11-08 23:10:25 UTC* * **PR** `#37549`_: (`Mrten`_) sqlite is not found in 2015.8 (refs: `#37565`_) * 694df30d40 Merge pull request `#37565`_ from rallytime/bp-37549 * c92a90b8e5 Update sqlite3.py * fb76557a2a sqlite is not found in 2015.8 * **ISSUE** `#37511`_: (`jdelic`_) service.dead now only operates if the service file exists (refs: `#37562`_) * **PR** `#37562`_: (`terminalmage`_) Fix regression in service.dead state * **ISSUE** `#37554`_: (`sjmh`_) salt-api doesn't dynamically re-read nodegroups configuration (refs: `#37560`_) * **PR** `#37560`_: (`whiteinge`_) Skip config type checking for sdb values * **PR** `#37556`_: (`rallytime`_) Don't pass the vpc id to boto.vpc.create_internet_gateway func * **PR** `#37543`_: (`multani`_) Documentation rendering fixes * **ISSUE** `saltstack/salt#31081`_: (`JensRantil`_) salt.modules.file.line documentation unclarities (refs: `#37457`_) * **PR** `#37457`_: (`rallytime`_) Fixup file.line docs to be more clear and consistent @ *2016-11-08 00:29:20 UTC* * 96b8b9a849 Merge pull request `#37457`_ from rallytime/fix-31081 * 25821bb8db Clarify which modes use "before", "after", and "indent" options * 8b2d2b9e7b Clarify file.line state docs as well * b2615892eb Move note about using mode=insert with location options to mode section * db0b0cefb8 Fixup file.line docs to be more clear and consistent * **ISSUE** `#35799`_: (`davegiles`_) dsc.apply_config hangs (no error) on empty directory on target (refs: `#37526`_) * **PR** `#37526`_: (`twangboy`_) Remove loop from dsc.apply_config @ *2016-11-08 00:23:11 UTC* * 7de790ffed Merge pull request `#37526`_ from twangboy/fix_35799 * fc4260911c Remove unnecessary format * c934a2bfa7 Remove the loop from apply_config * **PR** `saltstack/salt#37515`_: (`rallytime`_) [carbon] Merge forward from 2016.3 to carbon (refs: `#37534`_) * **PR** `#37534`_: (`rallytime`_) Back-port fix needed from `#37515`_ @ *2016-11-08 00:14:46 UTC* * **PR** `#37515`_: (`rallytime`_) [carbon] Merge forward from 2016.3 to carbon (refs: `#37534`_) * 94811df2ea Merge pull request `#37534`_ from rallytime/bp-merge-foward-fix * d1b2af1d69 Add missing source_hash_name args to a couple funcs * **PR** `#37533`_: (`whiteinge`_) Return a 504 response instead of 500 for Salt timeouts @ *2016-11-08 00:14:15 UTC* * 17adbb0c9f Merge pull request `#37533`_ from whiteinge/salt-api-504-timeouts * 63226aeda6 Return a 504 response instead of 500 for Salt timeouts * **ISSUE** `saltstack/salt#36679`_: (`lorengordon`_) Command 'Import-Module ServerManager' failed with return code: 1 (refs: #`saltstack/salt`#36736`_`_, `#36736`_) * **PR** `saltstack/salt#36736`_: (`m03`_) Fix issue 36679 win_servermanager error (refs: `#37529`_) * **PR** `#37529`_: (`lorengordon`_) Backport: PR 36736 to 2016.3 @ *2016-11-08 00:04:10 UTC* * **PR** `#36736`_: (`m03`_) Fix issue 36679 win_servermanager error * a9f03eee6f Merge pull request `#37529`_ from lorengordon/bp-36736 * 21c2664b6a Fix issue 36679 win_servermanager failure * **ISSUE** `#37444`_: (`Tanoti`_) Returning False from __virtual__ in a returner does not return expected error (refs: #`saltstack/salt`#37502`_`_, `#37519`_, `#37502`_) * **PR** `saltstack/salt#37502`_: (`cachedout`_) Log proper message on returners that cannot be loaded (refs: `#37519`_) * **PR** `#37519`_: (`rallytime`_) Update returner __virtual__() return messages for loader @ *2016-11-07 23:06:23 UTC* * 19475aada6 Merge pull request `#37519`_ from rallytime/returner-load-errors * fb261a31f3 Update returner __virtual__() return messages for loader * **ISSUE** `#35016`_: (`pingangit`_) TypeError: save_minions() got an unexpected keyword argument 'syndic_id' (refs: `#37527`_) * **PR** `#37527`_: (`rallytime`_) Add syndic_id=None kwarg to save_minions funcs in returners @ *2016-11-07 23:04:03 UTC* * fefdfab850 Merge pull request `#37527`_ from rallytime/fix-35016 * 2944b244aa Add syndic_id=None kwarg to save_minions funcs in returners * **PR** `#37530`_: (`gtmanfred`_) fix Lithium to 2015.5.0 * **PR** `#37514`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-07 16:51:06 UTC* * 743164844d Merge pull request `#37514`_ from rallytime/merge-2016.3 * 41166aede4 Merge branch '2015.8' into '2016.3' * c505a059ef [2015.8] Doc version updated to 2016.3.4 (`#37482`_) * **ISSUE** `#36713`_: (`Tanoti`_) ExtraData: unpack(b) received extra data after upgrading to 2016.3.3 (refs: `#37503`_) * **PR** `#37503`_: (`cachedout`_) Catch loader error on returners without save_load @ *2016-11-07 09:33:57 UTC* * 2d924d0820 Merge pull request `#37503`_ from cachedout/issue_36713 * 5f7f971b2c Catch loader error on returners without save_load * **ISSUE** `#37448`_: (`alisson276`_) In 'salt/key' events there are acts that never happen (refs: `#37499`_) * **PR** `#37499`_: (`cachedout`_) Clarify docs on salt-key events @ *2016-11-07 09:33:20 UTC* * d95bf59f97 Merge pull request `#37499`_ from cachedout/key_docs_clarify * 2758e74785 Clarify docs on salt-key events * **PR** `#37500`_: (`cachedout`_) Remove unused flag @ *2016-11-07 09:33:04 UTC* * 1dd1408ae6 Merge pull request `#37500`_ from cachedout/remove_include_errors * 6c705b11e0 Remove unused flag * **ISSUE** `#37444`_: (`Tanoti`_) Returning False from __virtual__ in a returner does not return expected error (refs: #`saltstack/salt`#37502`_`_, `#37519`_, `#37502`_) * **PR** `#37502`_: (`cachedout`_) Log proper message on returners that cannot be loaded @ *2016-11-07 09:32:45 UTC* * 4b6f1ab1c4 Merge pull request `#37502`_ from cachedout/issue_37444 * 4c5ab057ce Remove debugging * 17d01e4f4c Log proper message on returners that cannot be loaded * **ISSUE** `#37389`_: (`d101nelson`_) Some core grains are inaccurate or incomplete for Solaris (refs: `#37472`_) * **PR** `#37494`_: (`sjorge`_) Forgot to update os_family map in `#37472`_ @ *2016-11-06 22:18:54 UTC* * **PR** `#37472`_: (`sjorge`_) 2016.3 solaris grains improvements (refs: `#37494`_) * 2422dafd52 Merge pull request `#37494`_ from sjorge/2016.3-osfam_map * 96ba545492 Forgot to update os_family map in `#37472`_ * **PR** `#37496`_: (`mcalmer`_) fix status handling in sysv init scripts @ *2016-11-06 22:18:00 UTC* * 41bd8e3f52 Merge pull request `#37496`_ from mcalmer/fix-status-handling-in-sysv-init-scripts * 1fb2c4dfcf fix status handling in sysv init scripts * **PR** `#37497`_: (`terminalmage`_) Update 2016.3.5 release notes with source_hash_name explanation @ *2016-11-06 22:17:40 UTC* * e741a773a5 Merge pull request `#37497`_ from terminalmage/release_notes * c08038d9ea Update 2016.3.5 release notes with source_hash_name explanation * **PR** `#37486`_: (`twangboy`_) Add requirement for PowerShell 3 on Windows @ *2016-11-06 06:01:07 UTC* * f4426c2233 Merge pull request `#37486`_ from twangboy/fix_win_docs * 9e0631a1ae Add docs denoting the requirement for at least PowerShell 3 * **PR** `#37493`_: (`cachedout`_) Add sdb support to minion and master configs @ *2016-11-06 06:00:18 UTC* * a1f355a569 Merge pull request `#37493`_ from cachedout/minion_master_sdb * 9761a462c2 Add sdb support to minion and master configs * **ISSUE** `#31135`_: (`jeffreyctang`_) file.line mode=replace breaks on empty file. (refs: `#37452`_) * **PR** `#37452`_: (`rallytime`_) file.line with mode=replace on an empty file should return False, not stacktrace @ *2016-11-06 01:55:11 UTC* * be93710fee Merge pull request `#37452`_ from rallytime/fix-31135 * c792f76d2f Bump log level from debug to warning on empty file * 5f181cf00d file.line with mode=replace on an empty file should return False * 94a00c66eb Write a unit test demonstrating stack trace in `#31135`_ * **ISSUE** `#37001`_: (`phil123456`_) URGENT : archive.extracted does not work anymore (refs: `#37081`_, #saltstack/salt`#37081`_) * **ISSUE** `#29010`_: (`The-Loeki`_) file.managed download failing checksum testing for Ubuntu initrd w/source_hash (refs: `#37469`_) * **PR** `saltstack/salt#37081`_: (`terminalmage`_) Fix archive.extracted remote source_hash verification (refs: `#37469`_) * **PR** `#37469`_: (`terminalmage`_) Rewrite file.extract_hash to improve its matching ability @ *2016-11-06 01:50:01 UTC* * **PR** `#37081`_: (`terminalmage`_) Fix archive.extracted remote source_hash verification (refs: `#37469`_) * 129b0387e6 Merge pull request `#37469`_ from terminalmage/issue29010 * a3f38e5a9f Update file.extract_hash unit tests * b26b528f79 Add the source_hash_name param to file.managed states * 52fe72d402 Rewrite file.extract_hash * **ISSUE** `#37389`_: (`d101nelson`_) Some core grains are inaccurate or incomplete for Solaris (refs: `#37472`_) * **PR** `#37472`_: (`sjorge`_) 2016.3 solaris grains improvements (refs: `#37494`_) @ *2016-11-06 01:46:10 UTC* * 9426b9d5c4 Merge pull request `#37472`_ from sjorge/2016.3-solaris-grains * 2958f5ce52 detect and properly handle OmniOS * 37c3a7f5ab handle Oracle Solaris better * 69706d32be parse minorrelease if it has a / in it * d1cf4a0e56 improve regex for parsing /etc/release using files from Solaris 8 SPARC and Solaris 10 * 88eddef765 some more cleanup for smartos * d3ff39f09c improve smartos os version grains * **PR** `#37478`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-04 20:30:08 UTC* * 4ba63aba48 Merge pull request `#37478`_ from rallytime/merge-2016.3 * 3483a445f2 Merge branch '2015.8' into '2016.3' * 35888c2e30 Merge pull request `#37408`_ from terminalmage/issue37286 * 4e4a05731e Strip slashes from gitfs mountpoints * b6c57c6c8d Merge pull request `#37418`_ from terminalmage/issue36849 * 740bc54239 Do not use compression in tornado httpclient requests * 7fba8aaa7e Merge pull request `#37441`_ from rallytime/bp-37428 * 6fe3ef49de Fix incorrect reference of __utils__ in salt.utils * **PR** `#37485`_: (`rallytime`_) Get release notes started for 2016.3.5 * **PR** `#37483`_: (`rallytime`_) [2016.3] Doc version updated to 2016.3.4 * **ISSUE** `#37123`_: (`nevins-b`_) file.recurse state doesn't support pulling from other environments (refs: `#37121`_) * **PR** `#37121`_: (`nevins-b`_) allow the file.recurse state to support saltenv @ *2016-11-04 05:59:28 UTC* * 580eca709b Merge pull request `#37121`_ from nevins-b/2016.3 * 99d2c360ed making messaging in tests match new return * bc4b0e7cda adding test for saltenv in file.recurse source url * 3315b67075 fixing saltenv if not set in url * a9683cbbd8 allow the file.recurse state to support saltenv (salt://example/dir?saltenv=dev) * **PR** `#37426`_: (`jfindlay`_) Wait for macOS to change system settings @ *2016-11-04 04:35:52 UTC* * **PR** `#37351`_: (`jfindlay`_) modules.mac_power: give macOS time to change setting (refs: `#37426`_) * 766b1437c2 Merge pull request `#37426`_ from jfindlay/mac_sleep * 43a8e199bf modules.mac_power: wait for system to make change * feabca6e0b modules.mac_system: wait for system to make change * 0213eb9a07 utils.mac_utils: add confirm_updated * **ISSUE** `#37238`_: (`cmclaughlin`_) Restarting master causes minion to hang (refs: `#37438`_, `#37602`_) * **ISSUE** `#37018`_: (`tsaridas`_) get events from python (refs: `#37438`_, `#37602`_) * **PR** `#37438`_: (`DmitryKuzmenko`_) Fix for `#37238`_ salt hang on master restart (refs: `#37602`_) @ *2016-11-04 04:10:51 UTC* * 9eab5c8f71 Merge pull request `#37438`_ from DSRCorporation/bugs/37238_salt_hang_on_master_restart * f253d3ce4a Auto reconnect `salt` to master if the connection was lost. * **PR** `saltstack/salt#31207`_: (`thusoy`_) Remove error logging of missing boto libraries (refs: `#37440`_) * **PR** `#37440`_: (`rallytime`_) Back-port `#31207`_ to 2016.3 @ *2016-11-04 04:09:33 UTC* * **PR** `#31207`_: (`thusoy`_) Remove error logging of missing boto libraries (refs: `#37440`_) * 9aa7073f70 Merge pull request `#37440`_ from rallytime/bp-31207 * c71ae61271 Remove error logging of missing boto libraries * **PR** `#37442`_: (`twangboy`_) Create paths.d directory @ *2016-11-04 04:07:19 UTC* * edbfadca21 Merge pull request `#37442`_ from twangboy/fix_osx_postinstall * 8091a3065e Create paths.d directory * **PR** `#37445`_: (`twangboy`_) Check for Server os before checking [DO NOT MERGE FORWARD] @ *2016-11-04 04:04:49 UTC* * afb1b3cee5 Merge pull request `#37445`_ from twangboy/fix_import_error_2016.3 * c0d5ebdd8a Check for Server os before checking * **PR** `#37446`_: (`twangboy`_) Detect VC++ for Python on Win32 @ *2016-11-04 04:04:02 UTC* * 7a9f95ab3b Merge pull request `#37446`_ from twangboy/fix_build_32 * 2de69f48f8 Detect VC for Python correctly on 32bit Windows * **ISSUE** `saltstack/salt#36961`_: (`nullify005`_) boto_secgroup assumes a string when checking ip_protocol validity when not tcp|udp|all|-1 (refs: `#37447`_) * **PR** `#37447`_: (`rallytime`_) Cast ip_protocol rule as a str() in boto_secgroup.present @ *2016-11-04 04:03:45 UTC* * 651e0f728f Merge pull request `#37447`_ from rallytime/fix-36961 * 6b930ac7aa Cast ip_protocol rule as a str() in boto_secgroup.present * **ISSUE** `#36446`_: (`whiteinge`_) Custom salt-api config problem (refs: `#37455`_) * **PR** `saltstack/salt#36386`_: (`xiaoanyunfei`_) fix salt-api's default opts were covered by salt-master `#35734`_ (refs: `#37455`_) * **PR** `#37455`_: (`techhat`_) Make api opts respect correct root_dir @ *2016-11-04 03:25:40 UTC* * **PR** `#35734`_: (`xiaoanyunfei`_) fix salt-api's default opts were covered by salt-master (refs: #`saltstack/salt#36386`_) * a51d944c7c Merge pull request `#37455`_ from techhat/issue36446 * 7eff90d61d Make api opts respect correct root_dir * **PR** `#37459`_: (`twangboy`_) Fix error message when ConvertTo-Json not supported [DO NOT MERGE FORWARD] @ *2016-11-04 03:22:31 UTC* * 3591bf0f58 Merge pull request `#37459`_ from twangboy/fix_dsc_json_msg_2016.3 * 949b70913d Use cmd.run_all instead of cmd.shell * **PR** `#37430`_: (`meaksh`_) Including resolution parameters in the Zypper debug-solver call during a dry-run dist-upgrade (2016.3) @ *2016-11-03 14:35:46 UTC* * **PR** `#37353`_: (`meaksh`_) Including resolution parameters in the Zypper debug-solver call during a dry-run dist-upgrade (refs: `#37430`_) * 80a99c4cc5 Merge pull request `#37430`_ from meaksh/zypper-dist-upgrade-debug-solver-fix-2016.3 * ffc596f215 Including resolver params for Zypper debug-solver * **ISSUE** `#37388`_: (`tyhunt99`_) [2016.3.4] Refreshing of an s3 file server results in an exception. (refs: `#37428`_) * **PR** `#37428`_: (`cachedout`_) Fix incorrect reference of __utils__ in salt.utils (refs: `#37441`_) * **PR** `#37419`_: (`rallytime`_) [2016.3] Merge forward from 2015.8 to 2016.3 @ *2016-11-02 21:40:04 UTC* * 7864f9b79d Merge pull request `#37419`_ from rallytime/merge-2016.3 * bce47c9175 Merge branch '2015.8' into '2016.3' * 7b1d3b5562 Merge pull request `#37392`_ from rallytime/bp-33190 * 4063bae5de catch None cases for comments in jboss7 state module * **PR** `#37416`_: (`terminalmage`_) Fix regression in output for Ctrl-c'ed CLI jobs * **PR** `#37414`_: (`pass-by-value`_) Add unit tests for cloning from snapshot * **PR** `#37350`_: (`pass-by-value`_) Add handling for full and linked clone (refs: `#37414`_) * **PR** `saltstack/salt#37401`_: (`cachedout`_) Bootstrap delay option for salt-cloud (refs: `#37404`_) * **PR** `#37404`_: (`cachedout`_) Revert "Bootstrap delay option for salt-cloud" @ *2016-11-02 09:48:53 UTC* * ecd794a233 Merge pull request `#37404`_ from saltstack/revert-37401-bootstrap_delay * e864de8f03 Revert "Bootstrap delay option for salt-cloud" * **PR** `#37401`_: (`cachedout`_) Bootstrap delay option for salt-cloud @ *2016-11-02 09:02:13 UTC* * 2eb44fbd11 Merge pull request `#37401`_ from cachedout/bootstrap_delay * 6e42b0e157 Bootstrap delay option for salt-cloud * **PR** `#37350`_: (`pass-by-value`_) Add handling for full and linked clone (refs: `#37414`_) @ *2016-11-02 08:02:29 UTC* * 9446e48da0 Merge pull request `#37350`_ from pass-by-value/full_and_linked_clone_v1 * d8b1c9c777 Add handling for full and linked clone and commit disk mode additions * **ISSUE** `#34841`_: (`Ch3LL`_) Wrong return when using `user.chgroups` on windows (refs: `#37386`_) * **PR** `#37386`_: (`rallytime`_) Fix win_useradd.chgroups return when cmd.run_all retcode != 0 @ *2016-11-02 06:34:12 UTC* * c7f4d7f76a Merge pull request `#37386`_ from rallytime/fix-34841 * c70492a1fe Fix win_useradd.chgroups return when cmd.run_all retcode != 0 * **ISSUE** `#34263`_: (`vernondcole`_) Use of dnsmasq.set_config injects unintentional text into the configuration file. (refs: `#37390`_) * **PR** `#37390`_: (`rallytime`_) Don't insert __pub* keys into dnsmasq config file with set_config function @ *2016-11-02 06:31:53 UTC* * 34b6c6459a Merge pull request `#37390`_ from rallytime/fix-34263 * e082ff538b Fix failing test now that we're raising a CommandExecutionError * c6a3476abb Filter out the __pub keys passed via \*\*kwargs for dnsmasq.set_config * fd380c79b9 Add test case to reproduce dnsmasq.set_config failure in `#34263`_ * **ISSUE** `#35163`_: (`SolarisYan`_) salt file.mkdir (refs: `#35287`_, `#35189`_) * **PR** `#37391`_: (`rallytime`_) Back-port `#35287`_ to 2016.3 @ *2016-11-02 06:18:26 UTC* * **PR** `#35287`_: (`dere`_) 2016.3 (refs: `#37391`_) * **PR** `#35189`_: (`dere`_) return value for file.mkdir instead of None (refs: `#35287`_) * 798b2acbe3 Merge pull request `#37391`_ from rallytime/bp-35287 * 0e1ebea5a4 Simplify return value to "True". * 13022c5cc4 return value for mkdir instead of None * **ISSUE** `#37264`_: (`junster1`_) Parsing __grains__ with json.dumps in a module is returning an empty dict in 2016.3.3 (refs: `#37279`_) * **PR** `#37279`_: (`gtmanfred`_) initialize super class of NamespacedDictWrapper @ *2016-11-01 15:12:49 UTC* * 1a4833b3a1 Merge pull request `#37279`_ from gtmanfred/2016.3 * 597f346d57 initialize super class of NamespacedDictWrapper * **PR** `#37351`_: (`jfindlay`_) modules.mac_power: give macOS time to change setting (refs: `#37426`_) @ *2016-10-31 19:15:40 UTC* * 351175931c Merge pull request `#37351`_ from jfindlay/mac_set * 0c58056d84 modules.mac_power: give macOS time to change setting * **PR** `#37340`_: (`cachedout`_) SIGILL -> SIGKILL in process test @ *2016-10-31 08:50:10 UTC* * 25c987e33a Merge pull request `#37340`_ from cachedout/ill_kill_3 * a6b7417fe9 SIGILL -> SIGKILL in process test * **ISSUE** `#35480`_: (`jelenak`_) 200 processes of salt-master (2016.3.2) (refs: `#37306`_) * **PR** `#37306`_: (`DmitryKuzmenko`_) Don't use os.wait() on subprocesses managed by `multiprocessing`. @ *2016-10-31 06:55:30 UTC* * 7f1654894d Merge pull request `#37306`_ from DSRCorporation/bugs/35480_master_shutdown_no_process_error * b6937ebaa8 Don't use os.wait() on subprocesses managed by `multiprocessing`. * **ISSUE** `#34998`_: (`exowaucka`_) placementgroup parameter for salt-cloud is undocumented (refs: `#37314`_) * **PR** `#37314`_: (`rallytime`_) Document the existence of placementgroup option in ec2 driver @ *2016-10-31 06:42:33 UTC* * bf8ba97d54 Merge pull request `#37314`_ from rallytime/fix-34998 * 39459ed30b Document the existence of placementgroup option in ec2 driver * **ISSUE** `#36148`_: (`alex-zel`_) Eauth error with openLDAP groups (refs: `#37219`_) * **PR** `#37219`_: (`alex-zel`_) Fix freeipa ldap groups @ *2016-10-28 04:33:37 UTC* * e0baf4b193 Merge pull request `#37219`_ from alex-zel/fix-freeipa-ldap-groups * b5b2e7e097 Remove trailing whitespaces * 32f906b020 Add support for FreeIPA .. _`#10`: https://github.com/saltstack/salt/issues/10 .. _`#12788`: https://github.com/saltstack/salt/issues/12788 .. _`#15697`: https://github.com/saltstack/salt/issues/15697 .. _`#19269`: https://github.com/saltstack/salt/issues/19269 .. _`#19`: https://github.com/saltstack/salt/issues/19 .. _`#20`: https://github.com/saltstack/salt/issues/20 .. _`#25297`: https://github.com/saltstack/salt/issues/25297 .. _`#25361`: https://github.com/saltstack/salt/pull/25361 .. _`#27355`: https://github.com/saltstack/salt/issues/27355 .. _`#29010`: https://github.com/saltstack/salt/issues/29010 .. _`#29294`: https://github.com/saltstack/salt/pull/29294 .. _`#30454`: https://github.com/saltstack/salt/issues/30454 .. _`#30481`: https://github.com/saltstack/salt/pull/30481 .. _`#31135`: https://github.com/saltstack/salt/issues/31135 .. _`#31207`: https://github.com/saltstack/salt/pull/31207 .. _`#31953`: https://github.com/saltstack/salt/issues/31953 .. _`#32157`: https://github.com/saltstack/salt/pull/32157 .. _`#32400`: https://github.com/saltstack/salt/issues/32400 .. _`#32829`: https://github.com/saltstack/salt/issues/32829 .. _`#32965`: https://github.com/saltstack/salt/pull/32965 .. _`#33601`: https://github.com/saltstack/salt/pull/33601 .. _`#33645`: https://github.com/saltstack/salt/issues/33645 .. _`#33709`: https://github.com/saltstack/salt/issues/33709 .. _`#33833`: https://github.com/saltstack/salt/pull/33833 .. _`#34059`: https://github.com/saltstack/salt/pull/34059 .. _`#34263`: https://github.com/saltstack/salt/issues/34263 .. _`#34504`: https://github.com/saltstack/salt/issues/34504 .. _`#34547`: https://github.com/saltstack/salt/issues/34547 .. _`#34600`: https://github.com/saltstack/salt/issues/34600 .. _`#34841`: https://github.com/saltstack/salt/issues/34841 .. _`#34998`: https://github.com/saltstack/salt/issues/34998 .. _`#35016`: https://github.com/saltstack/salt/issues/35016 .. _`#35088`: https://github.com/saltstack/salt/issues/35088 .. _`#35163`: https://github.com/saltstack/salt/issues/35163 .. _`#35189`: https://github.com/saltstack/salt/pull/35189 .. _`#35287`: https://github.com/saltstack/salt/pull/35287 .. _`#35342`: https://github.com/saltstack/salt/issues/35342 .. _`#35390`: https://github.com/saltstack/salt/pull/35390 .. _`#35480`: https://github.com/saltstack/salt/issues/35480 .. _`#35673`: https://github.com/saltstack/salt/pull/35673 .. _`#35734`: https://github.com/saltstack/salt/pull/35734 .. _`#35799`: https://github.com/saltstack/salt/issues/35799 .. _`#35964`: https://github.com/saltstack/salt/issues/35964 .. _`#35965`: https://github.com/saltstack/salt/pull/35965 .. _`#36148`: https://github.com/saltstack/salt/issues/36148 .. _`#36446`: https://github.com/saltstack/salt/issues/36446 .. _`#36548`: https://github.com/saltstack/salt/issues/36548 .. _`#36598`: https://github.com/saltstack/salt/issues/36598 .. _`#36627`: https://github.com/saltstack/salt/pull/36627 .. _`#36629`: https://github.com/saltstack/salt/issues/36629 .. _`#36644`: https://github.com/saltstack/salt/issues/36644 .. _`#36706`: https://github.com/saltstack/salt/pull/36706 .. _`#36707`: https://github.com/saltstack/salt/issues/36707 .. _`#36713`: https://github.com/saltstack/salt/issues/36713 .. _`#36736`: https://github.com/saltstack/salt/pull/36736 .. _`#36784`: https://github.com/saltstack/salt/pull/36784 .. _`#36794`: https://github.com/saltstack/salt/pull/36794 .. _`#36893`: https://github.com/saltstack/salt/pull/36893 .. _`#36938`: https://github.com/saltstack/salt/pull/36938 .. _`#37001`: https://github.com/saltstack/salt/issues/37001 .. _`#37018`: https://github.com/saltstack/salt/issues/37018 .. _`#37059`: https://github.com/saltstack/salt/issues/37059 .. _`#37081`: https://github.com/saltstack/salt/pull/37081 .. _`#37084`: https://github.com/saltstack/salt/issues/37084 .. _`#37118`: https://github.com/saltstack/salt/issues/37118 .. _`#37121`: https://github.com/saltstack/salt/pull/37121 .. _`#37123`: https://github.com/saltstack/salt/issues/37123 .. _`#37149`: https://github.com/saltstack/salt/pull/37149 .. _`#37219`: https://github.com/saltstack/salt/pull/37219 .. _`#37238`: https://github.com/saltstack/salt/issues/37238 .. _`#37239`: https://github.com/saltstack/salt/pull/37239 .. _`#37264`: https://github.com/saltstack/salt/issues/37264 .. _`#37272`: https://github.com/saltstack/salt/pull/37272 .. _`#37279`: https://github.com/saltstack/salt/pull/37279 .. _`#37287`: https://github.com/saltstack/salt/issues/37287 .. _`#37306`: https://github.com/saltstack/salt/pull/37306 .. _`#37314`: https://github.com/saltstack/salt/pull/37314 .. _`#37340`: https://github.com/saltstack/salt/pull/37340 .. _`#37349`: https://github.com/saltstack/salt/pull/37349 .. _`#37350`: https://github.com/saltstack/salt/pull/37350 .. _`#37351`: https://github.com/saltstack/salt/pull/37351 .. _`#37353`: https://github.com/saltstack/salt/pull/37353 .. _`#37355`: https://github.com/saltstack/salt/issues/37355 .. _`#37358`: https://github.com/saltstack/salt/pull/37358 .. _`#37383`: https://github.com/saltstack/salt/issues/37383 .. _`#37386`: https://github.com/saltstack/salt/pull/37386 .. _`#37388`: https://github.com/saltstack/salt/issues/37388 .. _`#37389`: https://github.com/saltstack/salt/issues/37389 .. _`#37390`: https://github.com/saltstack/salt/pull/37390 .. _`#37391`: https://github.com/saltstack/salt/pull/37391 .. _`#37392`: https://github.com/saltstack/salt/pull/37392 .. _`#37401`: https://github.com/saltstack/salt/pull/37401 .. _`#37404`: https://github.com/saltstack/salt/pull/37404 .. _`#37408`: https://github.com/saltstack/salt/pull/37408 .. _`#37414`: https://github.com/saltstack/salt/pull/37414 .. _`#37416`: https://github.com/saltstack/salt/pull/37416 .. _`#37418`: https://github.com/saltstack/salt/pull/37418 .. _`#37419`: https://github.com/saltstack/salt/pull/37419 .. _`#37426`: https://github.com/saltstack/salt/pull/37426 .. _`#37428`: https://github.com/saltstack/salt/pull/37428 .. _`#37430`: https://github.com/saltstack/salt/pull/37430 .. _`#37438`: https://github.com/saltstack/salt/pull/37438 .. _`#37440`: https://github.com/saltstack/salt/pull/37440 .. _`#37441`: https://github.com/saltstack/salt/pull/37441 .. _`#37442`: https://github.com/saltstack/salt/pull/37442 .. _`#37444`: https://github.com/saltstack/salt/issues/37444 .. _`#37445`: https://github.com/saltstack/salt/pull/37445 .. _`#37446`: https://github.com/saltstack/salt/pull/37446 .. _`#37447`: https://github.com/saltstack/salt/pull/37447 .. _`#37448`: https://github.com/saltstack/salt/issues/37448 .. _`#37452`: https://github.com/saltstack/salt/pull/37452 .. _`#37455`: https://github.com/saltstack/salt/pull/37455 .. _`#37457`: https://github.com/saltstack/salt/pull/37457 .. _`#37459`: https://github.com/saltstack/salt/pull/37459 .. _`#37469`: https://github.com/saltstack/salt/pull/37469 .. _`#37472`: https://github.com/saltstack/salt/pull/37472 .. _`#37478`: https://github.com/saltstack/salt/pull/37478 .. _`#37481`: https://github.com/saltstack/salt/pull/37481 .. _`#37482`: https://github.com/saltstack/salt/pull/37482 .. _`#37483`: https://github.com/saltstack/salt/pull/37483 .. _`#37485`: https://github.com/saltstack/salt/pull/37485 .. _`#37486`: https://github.com/saltstack/salt/pull/37486 .. _`#37491`: https://github.com/saltstack/salt/issues/37491 .. _`#37492`: https://github.com/saltstack/salt/issues/37492 .. _`#37493`: https://github.com/saltstack/salt/pull/37493 .. _`#37494`: https://github.com/saltstack/salt/pull/37494 .. _`#37496`: https://github.com/saltstack/salt/pull/37496 .. _`#37497`: https://github.com/saltstack/salt/pull/37497 .. _`#37498`: https://github.com/saltstack/salt/issues/37498 .. _`#37499`: https://github.com/saltstack/salt/pull/37499 .. _`#37500`: https://github.com/saltstack/salt/pull/37500 .. _`#37502`: https://github.com/saltstack/salt/pull/37502 .. _`#37503`: https://github.com/saltstack/salt/pull/37503 .. _`#37511`: https://github.com/saltstack/salt/issues/37511 .. _`#37514`: https://github.com/saltstack/salt/pull/37514 .. _`#37515`: https://github.com/saltstack/salt/pull/37515 .. _`#37519`: https://github.com/saltstack/salt/pull/37519 .. _`#37521`: https://github.com/saltstack/salt/pull/37521 .. _`#37526`: https://github.com/saltstack/salt/pull/37526 .. _`#37527`: https://github.com/saltstack/salt/pull/37527 .. _`#37529`: https://github.com/saltstack/salt/pull/37529 .. _`#37530`: https://github.com/saltstack/salt/pull/37530 .. _`#37533`: https://github.com/saltstack/salt/pull/37533 .. _`#37534`: https://github.com/saltstack/salt/pull/37534 .. _`#37540`: https://github.com/saltstack/salt/pull/37540 .. _`#37541`: https://github.com/saltstack/salt/issues/37541 .. _`#37543`: https://github.com/saltstack/salt/pull/37543 .. _`#37549`: https://github.com/saltstack/salt/pull/37549 .. _`#37551`: https://github.com/saltstack/salt/issues/37551 .. _`#37553`: https://github.com/saltstack/salt/pull/37553 .. _`#37554`: https://github.com/saltstack/salt/issues/37554 .. _`#37556`: https://github.com/saltstack/salt/pull/37556 .. _`#37560`: https://github.com/saltstack/salt/pull/37560 .. _`#37562`: https://github.com/saltstack/salt/pull/37562 .. _`#37565`: https://github.com/saltstack/salt/pull/37565 .. _`#37571`: https://github.com/saltstack/salt/pull/37571 .. _`#37578`: https://github.com/saltstack/salt/pull/37578 .. _`#37579`: https://github.com/saltstack/salt/pull/37579 .. _`#37584`: https://github.com/saltstack/salt/pull/37584 .. _`#37592`: https://github.com/saltstack/salt/issues/37592 .. _`#37600`: https://github.com/saltstack/salt/pull/37600 .. _`#37602`: https://github.com/saltstack/salt/pull/37602 .. _`#37604`: https://github.com/saltstack/salt/pull/37604 .. _`#37607`: https://github.com/saltstack/salt/pull/37607 .. _`#37608`: https://github.com/saltstack/salt/pull/37608 .. _`#37611`: https://github.com/saltstack/salt/pull/37611 .. _`#37614`: https://github.com/saltstack/salt/pull/37614 .. _`#37617`: https://github.com/saltstack/salt/pull/37617 .. _`#37625`: https://github.com/saltstack/salt/pull/37625 .. _`#37626`: https://github.com/saltstack/salt/pull/37626 .. _`#37627`: https://github.com/saltstack/salt/pull/37627 .. _`#37628`: https://github.com/saltstack/salt/issues/37628 .. _`#37629`: https://github.com/saltstack/salt/pull/37629 .. _`#37638`: https://github.com/saltstack/salt/pull/37638 .. _`#37639`: https://github.com/saltstack/salt/pull/37639 .. _`#37640`: https://github.com/saltstack/salt/pull/37640 .. _`#37641`: https://github.com/saltstack/salt/pull/37641 .. _`#37642`: https://github.com/saltstack/salt/pull/37642 .. _`#37643`: https://github.com/saltstack/salt/issues/37643 .. _`#37644`: https://github.com/saltstack/salt/pull/37644 .. _`#37653`: https://github.com/saltstack/salt/issues/37653 .. _`#37665`: https://github.com/saltstack/salt/issues/37665 .. _`#37668`: https://github.com/saltstack/salt/pull/37668 .. _`#37680`: https://github.com/saltstack/salt/pull/37680 .. _`#37681`: https://github.com/saltstack/salt/pull/37681 .. _`#37684`: https://github.com/saltstack/salt/issues/37684 .. _`#37690`: https://github.com/saltstack/salt/pull/37690 .. _`#37694`: https://github.com/saltstack/salt/pull/37694 .. _`#37704`: https://github.com/saltstack/salt/pull/37704 .. _`#37705`: https://github.com/saltstack/salt/pull/37705 .. _`#37707`: https://github.com/saltstack/salt/pull/37707 .. _`#37718`: https://github.com/saltstack/salt/pull/37718 .. _`#37719`: https://github.com/saltstack/salt/pull/37719 .. _`#37721`: https://github.com/saltstack/salt/pull/37721 .. _`#37724`: https://github.com/saltstack/salt/pull/37724 .. _`#37725`: https://github.com/saltstack/salt/issues/37725 .. _`#37731`: https://github.com/saltstack/salt/pull/37731 .. _`#37732`: https://github.com/saltstack/salt/issues/37732 .. _`#37734`: https://github.com/saltstack/salt/issues/37734 .. _`#37735`: https://github.com/saltstack/salt/pull/37735 .. _`#37736`: https://github.com/saltstack/salt/pull/37736 .. _`#37737`: https://github.com/saltstack/salt/issues/37737 .. _`#37738`: https://github.com/saltstack/salt/pull/37738 .. _`#37742`: https://github.com/saltstack/salt/issues/37742 .. _`#37745`: https://github.com/saltstack/salt/pull/37745 .. _`#37748`: https://github.com/saltstack/salt/pull/37748 .. _`#37751`: https://github.com/saltstack/salt/issues/37751 .. _`#37760`: https://github.com/saltstack/salt/pull/37760 .. _`#37762`: https://github.com/saltstack/salt/pull/37762 .. _`#37763`: https://github.com/saltstack/salt/pull/37763 .. _`#37766`: https://github.com/saltstack/salt/pull/37766 .. _`#37767`: https://github.com/saltstack/salt/pull/37767 .. _`#37772`: https://github.com/saltstack/salt/pull/37772 .. _`#37775`: https://github.com/saltstack/salt/pull/37775 .. _`#37785`: https://github.com/saltstack/salt/pull/37785 .. _`#37787`: https://github.com/saltstack/salt/issues/37787 .. _`#37789`: https://github.com/saltstack/salt/pull/37789 .. _`#37790`: https://github.com/saltstack/salt/pull/37790 .. _`#37797`: https://github.com/saltstack/salt/pull/37797 .. _`#37810`: https://github.com/saltstack/salt/pull/37810 .. _`#37811`: https://github.com/saltstack/salt/pull/37811 .. _`#37812`: https://github.com/saltstack/salt/pull/37812 .. _`#37816`: https://github.com/saltstack/salt/pull/37816 .. _`#37817`: https://github.com/saltstack/salt/pull/37817 .. _`#37820`: https://github.com/saltstack/salt/pull/37820 .. _`#37821`: https://github.com/saltstack/salt/pull/37821 .. _`#37822`: https://github.com/saltstack/salt/pull/37822 .. _`#37823`: https://github.com/saltstack/salt/pull/37823 .. _`#37826`: https://github.com/saltstack/salt/pull/37826 .. _`#37827`: https://github.com/saltstack/salt/pull/37827 .. _`#37847`: https://github.com/saltstack/salt/pull/37847 .. _`#37856`: https://github.com/saltstack/salt/pull/37856 .. _`#37857`: https://github.com/saltstack/salt/pull/37857 .. _`#37863`: https://github.com/saltstack/salt/pull/37863 .. _`#37866`: https://github.com/saltstack/salt/pull/37866 .. _`#37867`: https://github.com/saltstack/salt/issues/37867 .. _`#37870`: https://github.com/saltstack/salt/issues/37870 .. _`#37886`: https://github.com/saltstack/salt/pull/37886 .. _`#37895`: https://github.com/saltstack/salt/pull/37895 .. _`#37896`: https://github.com/saltstack/salt/pull/37896 .. _`#37899`: https://github.com/saltstack/salt/pull/37899 .. _`#37907`: https://github.com/saltstack/salt/pull/37907 .. _`#37912`: https://github.com/saltstack/salt/pull/37912 .. _`#37914`: https://github.com/saltstack/salt/pull/37914 .. _`#37916`: https://github.com/saltstack/salt/pull/37916 .. _`#37918`: https://github.com/saltstack/salt/pull/37918 .. _`#37921`: https://github.com/saltstack/salt/pull/37921 .. _`#37924`: https://github.com/saltstack/salt/pull/37924 .. _`#37925`: https://github.com/saltstack/salt/pull/37925 .. _`#37926`: https://github.com/saltstack/salt/pull/37926 .. _`#37928`: https://github.com/saltstack/salt/pull/37928 .. _`#37929`: https://github.com/saltstack/salt/pull/37929 .. _`#37939`: https://github.com/saltstack/salt/issues/37939 .. _`#37945`: https://github.com/saltstack/salt/issues/37945 .. _`#37950`: https://github.com/saltstack/salt/pull/37950 .. _`#37961`: https://github.com/saltstack/salt/pull/37961 .. _`#37962`: https://github.com/saltstack/salt/pull/37962 .. _`#37964`: https://github.com/saltstack/salt/pull/37964 .. _`#37978`: https://github.com/saltstack/salt/pull/37978 .. _`#37995`: https://github.com/saltstack/salt/pull/37995 .. _`#38002`: https://github.com/saltstack/salt/pull/38002 .. _`#38034`: https://github.com/saltstack/salt/pull/38034 .. _`#38037`: https://github.com/saltstack/salt/issues/38037 .. _`#38039`: https://github.com/saltstack/salt/pull/38039 .. _`#38045`: https://github.com/saltstack/salt/pull/38045 .. _`#38057`: https://github.com/saltstack/salt/pull/38057 .. _`#38059`: https://github.com/saltstack/salt/pull/38059 .. _`#38083`: https://github.com/saltstack/salt/pull/38083 .. _`#38087`: https://github.com/saltstack/salt/issues/38087 .. _`#38091`: https://github.com/saltstack/salt/issues/38091 .. _`#38102`: https://github.com/saltstack/salt/pull/38102 .. _`#38104`: https://github.com/saltstack/salt/pull/38104 .. _`#38134`: https://github.com/saltstack/salt/pull/38134 .. _`#38153`: https://github.com/saltstack/salt/pull/38153 .. _`#38162`: https://github.com/saltstack/salt/issues/38162 .. _`#38163`: https://github.com/saltstack/salt/pull/38163 .. _`#38174`: https://github.com/saltstack/salt/issues/38174 .. _`#38177`: https://github.com/saltstack/salt/pull/38177 .. _`#38181`: https://github.com/saltstack/salt/pull/38181 .. _`#38185`: https://github.com/saltstack/salt/pull/38185 .. _`#38187`: https://github.com/saltstack/salt/issues/38187 .. _`#38191`: https://github.com/saltstack/salt/pull/38191 .. _`#38194`: https://github.com/saltstack/salt/pull/38194 .. _`#38198`: https://github.com/saltstack/salt/pull/38198 .. _`#38209`: https://github.com/saltstack/salt/issues/38209 .. _`#38213`: https://github.com/saltstack/salt/pull/38213 .. _`#38221`: https://github.com/saltstack/salt/pull/38221 .. _`#38223`: https://github.com/saltstack/salt/pull/38223 .. _`#38224`: https://github.com/saltstack/salt/pull/38224 .. _`#38248`: https://github.com/saltstack/salt/pull/38248 .. _`#38254`: https://github.com/saltstack/salt/pull/38254 .. _`#38256`: https://github.com/saltstack/salt/pull/38256 .. _`#38279`: https://github.com/saltstack/salt/pull/38279 .. _`#38281`: https://github.com/saltstack/salt/pull/38281 .. _`#38282`: https://github.com/saltstack/salt/issues/38282 .. _`#38288`: https://github.com/saltstack/salt/pull/38288 .. _`#38290`: https://github.com/saltstack/salt/issues/38290 .. _`#38312`: https://github.com/saltstack/salt/pull/38312 .. _`#38313`: https://github.com/saltstack/salt/pull/38313 .. _`#38320`: https://github.com/saltstack/salt/pull/38320 .. _`#38353`: https://github.com/saltstack/salt/issues/38353 .. _`#38372`: https://github.com/saltstack/salt/issues/38372 .. _`#38382`: https://github.com/saltstack/salt/pull/38382 .. _`#38385`: https://github.com/saltstack/salt/pull/38385 .. _`#38388`: https://github.com/saltstack/salt/issues/38388 .. _`#38390`: https://github.com/saltstack/salt/pull/38390 .. _`#38398`: https://github.com/saltstack/salt/pull/38398 .. _`#38407`: https://github.com/saltstack/salt/pull/38407 .. _`#38415`: https://github.com/saltstack/salt/pull/38415 .. _`#38419`: https://github.com/saltstack/salt/pull/38419 .. _`#38420`: https://github.com/saltstack/salt/pull/38420 .. _`#38421`: https://github.com/saltstack/salt/pull/38421 .. _`#38434`: https://github.com/saltstack/salt/pull/38434 .. _`#38438`: https://github.com/saltstack/salt/issues/38438 .. _`#38449`: https://github.com/saltstack/salt/issues/38449 .. _`#38457`: https://github.com/saltstack/salt/pull/38457 .. _`#38467`: https://github.com/saltstack/salt/pull/38467 .. _`#38472`: https://github.com/saltstack/salt/issues/38472 .. _`#38474`: https://github.com/saltstack/salt/pull/38474 .. _`#38479`: https://github.com/saltstack/salt/issues/38479 .. _`#38487`: https://github.com/saltstack/salt/pull/38487 .. _`#38491`: https://github.com/saltstack/salt/pull/38491 .. _`#38503`: https://github.com/saltstack/salt/pull/38503 .. _`#38524`: https://github.com/saltstack/salt/issues/38524 .. _`#38527`: https://github.com/saltstack/salt/pull/38527 .. _`#38531`: https://github.com/saltstack/salt/pull/38531 .. _`#38536`: https://github.com/saltstack/salt/pull/38536 .. _`#38541`: https://github.com/saltstack/salt/pull/38541 .. _`#38542`: https://github.com/saltstack/salt/pull/38542 .. _`#38554`: https://github.com/saltstack/salt/pull/38554 .. _`#38558`: https://github.com/saltstack/salt/issues/38558 .. _`#38560`: https://github.com/saltstack/salt/pull/38560 .. _`#38562`: https://github.com/saltstack/salt/pull/38562 .. _`#38567`: https://github.com/saltstack/salt/pull/38567 .. _`#38570`: https://github.com/saltstack/salt/pull/38570 .. _`#38572`: https://github.com/saltstack/salt/issues/38572 .. _`#38579`: https://github.com/saltstack/salt/pull/38579 .. _`#38585`: https://github.com/saltstack/salt/pull/38585 .. _`#38587`: https://github.com/saltstack/salt/pull/38587 .. _`#38589`: https://github.com/saltstack/salt/pull/38589 .. _`#38598`: https://github.com/saltstack/salt/pull/38598 .. _`#38600`: https://github.com/saltstack/salt/pull/38600 .. _`#38601`: https://github.com/saltstack/salt/pull/38601 .. _`#38602`: https://github.com/saltstack/salt/pull/38602 .. _`#38604`: https://github.com/saltstack/salt/issues/38604 .. _`#38618`: https://github.com/saltstack/salt/pull/38618 .. _`#38622`: https://github.com/saltstack/salt/issues/38622 .. _`#38626`: https://github.com/saltstack/salt/pull/38626 .. _`#38629`: https://github.com/saltstack/salt/issues/38629 .. _`#38647`: https://github.com/saltstack/salt/pull/38647 .. _`#38648`: https://github.com/saltstack/salt/issues/38648 .. _`#38649`: https://github.com/saltstack/salt/pull/38649 .. _`#38650`: https://github.com/saltstack/salt/pull/38650 .. _`#38657`: https://github.com/saltstack/salt/pull/38657 .. _`#38668`: https://github.com/saltstack/salt/pull/38668 .. _`#38669`: https://github.com/saltstack/salt/pull/38669 .. _`#38674`: https://github.com/saltstack/salt/issues/38674 .. _`#38693`: https://github.com/saltstack/salt/pull/38693 .. _`#38707`: https://github.com/saltstack/salt/pull/38707 .. _`#38720`: https://github.com/saltstack/salt/pull/38720 .. _`#38723`: https://github.com/saltstack/salt/pull/38723 .. _`#38731`: https://github.com/saltstack/salt/pull/38731 .. _`#38735`: https://github.com/saltstack/salt/pull/38735 .. _`#38739`: https://github.com/saltstack/salt/pull/38739 .. _`#38743`: https://github.com/saltstack/salt/pull/38743 .. _`#38749`: https://github.com/saltstack/salt/pull/38749 .. _`#38774`: https://github.com/saltstack/salt/pull/38774 .. _`#38789`: https://github.com/saltstack/salt/pull/38789 .. _`#38790`: https://github.com/saltstack/salt/pull/38790 .. _`#38792`: https://github.com/saltstack/salt/pull/38792 .. _`#38796`: https://github.com/saltstack/salt/pull/38796 .. _`#38808`: https://github.com/saltstack/salt/pull/38808 .. _`#38809`: https://github.com/saltstack/salt/pull/38809 .. _`#38812`: https://github.com/saltstack/salt/pull/38812 .. _`#38813`: https://github.com/saltstack/salt/pull/38813 .. _`#38833`: https://github.com/saltstack/salt/pull/38833 .. _`#5999`: https://github.com/saltstack/salt/issues/5999 .. _`747project`: https://github.com/747project .. _`Akilesh1597`: https://github.com/Akilesh1597 .. _`Arabus`: https://github.com/Arabus .. _`AvinashDeluxeVR`: https://github.com/AvinashDeluxeVR .. _`COLABORATI`: https://github.com/COLABORATI .. _`Ch3LL`: https://github.com/Ch3LL .. _`DmitryKuzmenko`: https://github.com/DmitryKuzmenko .. _`Firewire2002`: https://github.com/Firewire2002 .. _`JensRantil`: https://github.com/JensRantil .. _`Modulus`: https://github.com/Modulus .. _`Mrten`: https://github.com/Mrten .. _`NickDubelman`: https://github.com/NickDubelman .. _`SolarisYan`: https://github.com/SolarisYan .. _`Talkless`: https://github.com/Talkless .. _`Tanoti`: https://github.com/Tanoti .. _`The-Loeki`: https://github.com/The-Loeki .. _`TronPaul`: https://github.com/TronPaul .. _`UtahDave`: https://github.com/UtahDave .. _`aaronm-cloudtek`: https://github.com/aaronm-cloudtek .. _`abonillasuse`: https://github.com/abonillasuse .. _`alex-zel`: https://github.com/alex-zel .. _`alexandr-orlov`: https://github.com/alexandr-orlov .. _`alexbleotu`: https://github.com/alexbleotu .. _`alisson276`: https://github.com/alisson276 .. _`arthurlogilab`: https://github.com/arthurlogilab .. _`attiasr`: https://github.com/attiasr .. _`b-harper`: https://github.com/b-harper .. _`b1naryth1ef`: https://github.com/b1naryth1ef .. _`basepi`: https://github.com/basepi .. _`bdrung`: https://github.com/bdrung .. _`blaketmiller`: https://github.com/blaketmiller .. _`bshelton229`: https://github.com/bshelton229 .. _`cachedout`: https://github.com/cachedout .. _`calve`: https://github.com/calve .. _`clan`: https://github.com/clan .. _`clinta`: https://github.com/clinta .. _`cmclaughlin`: https://github.com/cmclaughlin .. _`craigafinch`: https://github.com/craigafinch .. _`cro`: https://github.com/cro .. _`curiositycasualty`: https://github.com/curiositycasualty .. _`d101nelson`: https://github.com/d101nelson .. _`davegiles`: https://github.com/davegiles .. _`davidpsv17`: https://github.com/davidpsv17 .. _`dere`: https://github.com/dere .. _`dereckson`: https://github.com/dereckson .. _`dhaines`: https://github.com/dhaines .. _`dincamihai`: https://github.com/dincamihai .. _`dmurphy18`: https://github.com/dmurphy18 .. _`do3meli`: https://github.com/do3meli .. _`dragon788`: https://github.com/dragon788 .. _`edgan`: https://github.com/edgan .. _`edwardsdanielj`: https://github.com/edwardsdanielj .. _`elyulka`: https://github.com/elyulka .. _`ericuldall`: https://github.com/ericuldall .. _`exowaucka`: https://github.com/exowaucka .. _`fanirama`: https://github.com/fanirama .. _`favoretti`: https://github.com/favoretti .. _`fedusia`: https://github.com/fedusia .. _`fj40crawler`: https://github.com/fj40crawler .. _`freach`: https://github.com/freach .. _`genuss`: https://github.com/genuss .. _`githubcdr`: https://github.com/githubcdr .. _`gravyboat`: https://github.com/gravyboat .. _`gstachowiak`: https://github.com/gstachowiak .. _`gtmanfred`: https://github.com/gtmanfred .. _`haeac`: https://github.com/haeac .. _`heewa`: https://github.com/heewa .. _`hu-dabao`: https://github.com/hu-dabao .. _`ikkaro`: https://github.com/ikkaro .. _`jackywu`: https://github.com/jackywu .. _`jdelic`: https://github.com/jdelic .. _`jeanpralo`: https://github.com/jeanpralo .. _`jeffreyctang`: https://github.com/jeffreyctang .. _`jelenak`: https://github.com/jelenak .. _`jerob`: https://github.com/jerob .. _`jf`: https://github.com/jf .. _`jfindlay`: https://github.com/jfindlay .. _`jinm`: https://github.com/jinm .. _`johje349`: https://github.com/johje349 .. _`jsandas`: https://github.com/jsandas .. _`junster1`: https://github.com/junster1 .. _`ketzacoatl`: https://github.com/ketzacoatl .. _`kevinquinnyo`: https://github.com/kevinquinnyo .. _`kluoto`: https://github.com/kluoto .. _`kontrolld`: https://github.com/kontrolld .. _`laleocen`: https://github.com/laleocen .. _`limited`: https://github.com/limited .. _`lorengordon`: https://github.com/lorengordon .. _`m03`: https://github.com/m03 .. _`markuskramerIgitt`: https://github.com/markuskramerIgitt .. _`mcalmer`: https://github.com/mcalmer .. _`mchugh19`: https://github.com/mchugh19 .. _`meaksh`: https://github.com/meaksh .. _`mikejford`: https://github.com/mikejford .. _`moio`: https://github.com/moio .. _`morganwillcock`: https://github.com/morganwillcock .. _`msummers42`: https://github.com/msummers42 .. _`multani`: https://github.com/multani .. _`nevins-b`: https://github.com/nevins-b .. _`nullify005`: https://github.com/nullify005 .. _`pass-by-value`: https://github.com/pass-by-value .. _`phil123456`: https://github.com/phil123456 .. _`pille`: https://github.com/pille .. _`pingangit`: https://github.com/pingangit .. _`rallytime`: https://github.com/rallytime .. _`rbjorklin`: https://github.com/rbjorklin .. _`saltstack/salt#31081`: https://github.com/saltstack/salt/issues/31081 .. _`saltstack/salt#31207`: https://github.com/saltstack/salt/pull/31207 .. _`saltstack/salt#36386`: https://github.com/saltstack/salt/pull/36386 .. _`saltstack/salt#36679`: https://github.com/saltstack/salt/issues/36679 .. _`saltstack/salt#36736`: https://github.com/saltstack/salt/pull/36736 .. _`saltstack/salt#36961`: https://github.com/saltstack/salt/issues/36961 .. _`saltstack/salt#37081`: https://github.com/saltstack/salt/pull/37081 .. _`saltstack/salt#37118`: https://github.com/saltstack/salt/issues/37118 .. _`saltstack/salt#37358`: https://github.com/saltstack/salt/pull/37358 .. _`saltstack/salt#37401`: https://github.com/saltstack/salt/pull/37401 .. _`saltstack/salt#37502`: https://github.com/saltstack/salt/pull/37502 .. _`saltstack/salt#37515`: https://github.com/saltstack/salt/pull/37515 .. _`saltstack/salt#37549`: https://github.com/saltstack/salt/pull/37549 .. _`saltstack/salt#38707`: https://github.com/saltstack/salt/pull/38707 .. _`sash-kan`: https://github.com/sash-kan .. _`sebw`: https://github.com/sebw .. _`secumod`: https://github.com/secumod .. _`siccrusher`: https://github.com/siccrusher .. _`silenius`: https://github.com/silenius .. _`sjmh`: https://github.com/sjmh .. _`sjorge`: https://github.com/sjorge .. _`skizunov`: https://github.com/skizunov .. _`slinn0`: https://github.com/slinn0 .. _`sofixa`: https://github.com/sofixa .. _`swalladge`: https://github.com/swalladge .. _`techhat`: https://github.com/techhat .. _`tedski`: https://github.com/tedski .. _`terminalmage`: https://github.com/terminalmage .. _`thatch45`: https://github.com/thatch45 .. _`thusoy`: https://github.com/thusoy .. _`tjyang`: https://github.com/tjyang .. _`toanju`: https://github.com/toanju .. _`tobiasBora`: https://github.com/tobiasBora .. _`tobithiel`: https://github.com/tobithiel .. _`tsaridas`: https://github.com/tsaridas .. _`twangboy`: https://github.com/twangboy .. _`tyeapple`: https://github.com/tyeapple .. _`tyhunt99`: https://github.com/tyhunt99 .. _`vernondcole`: https://github.com/vernondcole .. _`viict`: https://github.com/viict .. _`vutny`: https://github.com/vutny .. _`wanparo`: https://github.com/wanparo .. _`whiteinge`: https://github.com/whiteinge .. _`xiaoanyunfei`: https://github.com/xiaoanyunfei .. _`yhekma`: https://github.com/yhekma .. _`zwo-bot`: https://github.com/zwo-bot
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/2016.3.5.rst
0.824533
0.675711
2016.3.5.rst
pypi
======================== Salt 0.9.8 Release Notes ======================== :release: 2012-03-21 Salt 0.9.8 is a big step forward, with many additions and enhancements, as well as a number of precursors to advanced future developments. This version of Salt adds much more power to the command line, making the old hard timeout issues a thing of the past and adds keyword argument support. These additions are also available in the salt client API, making the available API tools much more powerful. The new pillar system allows for data to be stored on the master and assigned to minions in a granular way similar to the state system. It also allows flexibility for users who want to keep data out of their state tree similar to 'external lookup' functionality in other tools. A new way to extend requisites was added, the "requisite in" statement. This makes adding requires or watch statements to external state decs much easier. Additions to requisites making them much more powerful have been added as well as improved error checking for sls files in the state system. A new provider system has been added to allow for redirecting what modules run in the background for individual states. Support for openSUSE has been added and support for Solaris has begun serious development. Windows support has been significantly enhanced as well. The matcher and target systems have received a great deal of attention. The default behavior of grain matching has changed slightly to reflect the rest of salt and the compound matcher system has been refined. A number of impressive features with keyword arguments have been added to both the CLI and to the state system. This makes states much more powerful and flexible while maintaining the simple configuration everyone loves. The new batch size capability allows for executions to be rolled through a group of targeted minions a percentage or specific number at a time. This was added to prevent the "thundering herd" problem when targeting large numbers of minions for things like service restarts or file downloads. Upgrade Considerations ====================== Upgrade Issues -------------- There was a previously missed oversight which could cause a newer minion to crash an older master. That oversight has been resolved so the version incompatibility issue will no longer occur. When upgrading to 0.9.8 make sure to upgrade the master first, followed by the minions. Debian/Ubuntu Packages ---------------------- The original Debian/Ubuntu packages were called salt and included all salt applications. New packages in the ppa are split by function. If an old salt package is installed then it should be manually removed and the new split packages need to be freshly installed. On the master: .. code-block:: sh # apt-get purge salt # apt-get install salt-{master,minion} On the minions: .. code-block:: sh # apt-get purge salt # apt-get install salt-minion And on any Syndics: .. code-block:: sh # apt-get install salt-syndic The official Salt PPA for Ubuntu is located at: https://launchpad.net/~saltstack/+archive/ubuntu/salt Major Features ============== Pillar ------ :ref:`Pillar <pillar>` offers an interface to declare variable data on the master that is then assigned to the minions. The pillar data is made available to all modules, states, sls files etc. It is compiled on the master and is declared using the existing renderer system. This means that learning pillar should be fairly trivial to those already familiar with salt states. CLI Additions ------------- The ``salt`` command has received a serious overhaul and is more powerful than ever. Data is returned to the terminal as it is received, and the salt command will now wait for all running minions to return data before stopping. This makes adding very large *--timeout* arguments completely unnecessary and gets rid of long running operations returning empty ``{}`` when the timeout is exceeded. When calling salt via sudo, the user originally running salt is saved to the log for auditing purposes. This makes it easy to see who ran what by just looking through the minion logs. The *salt-key* command gained the *-D* and *--delete-all* arguments for removing all keys. Be careful with this one! Running States Without a Master ------------------------------- The addition of running states without a salt-master has been added to 0.9.8. This feature allows for the unmodified salt state tree to be read locally from a minion. The result is that the UNMODIFIED state tree has just become portable, allowing minions to have a local copy of states or to manage states without a master entirely. This is accomplished via the new file client interface in Salt that allows for the ``salt://`` URI to be redirected to custom interfaces. This means that there are now two interfaces for the salt file server, calling the master or looking in a local, minion defined ``file_roots``. This new feature can be used by modifying the minion config to point to a local ``file_roots`` and setting the ``file_client`` option to ``local``. Keyword Arguments and States ---------------------------- State modules now accept the ``**kwargs`` argument. This results in all data in a sls file assigned to a state being made available to the state function. This passes data in a transparent way back to the modules executing the logic. In particular, this allows adding arguments to the ``pkg.install`` module that enable more advanced and granular controls with respect to what the state is capable of. An example of this along with the new debconf module for installing ldap client packages on Debian: .. code-block:: yaml ldap-client-packages: pkg: - debconf: salt://debconf/ldap-client.ans - installed - names: - nslcd - libpam-ldapd - libnss-ldapd Keyword Arguments and the CLI ----------------------------- In the past it was required that all arguments be passed in the proper order to the *salt* and *salt-call* commands. As of 0.9.8, keyword arguments can be passed in the form of ``kwarg=argument``. .. code-block:: sh # salt -G 'type:dev' git.clone \ repository=https://github.com/saltstack/salt.git cwd=/tmp/salt user=jeff Matcher Refinements and Changes ------------------------------- A number of fixes and changes have been applied to the Matcher system. The most noteworthy is the change in the grain matcher. The grain matcher used to use a regular expression to match the passed data to a grain, but now defaults to a shell glob like the majority of match interfaces in Salt. A new option is available that still uses the old style regex matching to grain data called ``grain-pcre``. To use regex matching in compound matches use the letter *P*. For example, this would match any ArchLinux or Fedora minions: .. code-block:: sh # salt --grain-pcre 'os:(Arch:Fed).*' test.ping And the associated compound matcher suitable for ``top.sls`` is *P*: .. code-block:: sh P@os:(Arch|Fed).* **NOTE**: Changing the grains matcher from pcre to glob is backwards incompatible. Support has been added for matching minions with Yahoo's range library. This is handled by passing range syntax with *-R* or *--range* arguments to salt. More information at: https://github.com/ytoolshed/range/wiki/%22yamlfile%22-module-file-spec Requisite "in" -------------- A new means to updating requisite statements has been added to make adding watchers and requires to external states easier. Before 0.9.8 the only way to extend the states that were watched by a state outside of the sls was to use an extend statement: .. code-block:: yaml include: - http extend: apache: service: - watch: - pkg: tomcat tomcat: pkg: - installed But the new ``Requisite in`` statement allows for easier extends for requisites: .. code-block:: yaml include: - http tomcat: pkg: - installed - watch_in: - service: apache Requisite in is part of the extend system, so still remember to always include the sls that is being extended! Providers --------- Salt predetermines what modules should be mapped to what uses based on the properties of a system. These determinations are generally made for modules that provide things like package and service management. The apt module maps to pkg on Debian and the yum module maps to pkg on Fedora for instance. Sometimes in states, it may be necessary for a non-default module to be used for the desired functionality. For instance, an Arch Linux system may have been set up with systemd support. Instead of using the default service module detected for Arch Linux, the systemd module can be used: .. code-block:: yaml http: service: - running - enable: True - provider: systemd Default providers can also be defined in the minion config file: .. code-block:: yaml providers: service: systemd When default providers are passed in the minion config, then those providers will be applied to all functionality in Salt, this means that the functions called by the minion will use these modules, as well as states. Requisite Glob Matching ----------------------- Requisites can now be defined with glob expansion. This means that if there are many requisites, they can be defined on a single line. To watch all files in a directory: .. code-block:: yaml http: service: - running - enable: True - watch: - file: /etc/http/conf.d/* This example will watch all defined files that match the glob ``/etc/http/conf.d/*`` Batch Size ---------- The new batch size option allows commands to be executed while maintaining that only so many hosts are executing the command at one time. This option can take a percentage or a finite number: .. code-block:: bash salt '*' -b 10 test.ping salt -G 'os:RedHat' --batch-size 25% apache.signal restart This will only run test.ping on 10 of the targeted minions at a time and then restart apache on 25% of the minions matching ``os:RedHat`` at a time and work through them all until the task is complete. This makes jobs like rolling web server restarts behind a load balancer or doing maintenance on BSD firewalls using carp much easier with salt. Module Updates -------------- This is a list of notable, but non-exhaustive updates with new and existing modules. Windows support has seen a flurry of support this release cycle. We've gained all new :mod:`file <alt.modules.win_file>`, :mod:`network <salt.modules.win_network>`, and :mod:`shadow <salt.modules.win_shadow>` modules. Please note that these are still a work in progress. For our ruby users, new :mod:`rvm <salt.modules.rvm>` and :mod:`gem <salt.modules.gem>` modules have been added along with the :mod:`associated <salt.states.rvm>` :mod:`states <salt.states.gem>` The :mod:`virt <salt.modules.virt>` module gained basic Xen support. The :mod:`yum <salt.modules.yumpkg>` module gained Scientific Linux support. The :mod:`pkg <salt.modules.aptpkg>` module on Debian, Ubuntu, and derivatives force apt to run in a non-interactive mode. This prevents issues when package installation waits for confirmation. A :mod:`pkg <salt.modules.zypper>` module for OpenSUSE's zypper was added. The :mod:`service <salt.modules.upstart>` module on Ubuntu natively supports upstart. A new :mod:`debconf <salt.modules.debconfmod>` module was contributed by our community for more advanced control over deb package deployments on Debian based distributions. The :mod:`mysql.user <salt.states.mysql_user>` state and :mod:`mysql <salt.modules.mysql>` module gained a *password_hash* argument. The :mod:`cmd <salt.modules.cmdmod>` module and state gained a *shell* keyword argument for specifying a shell other than ``/bin/sh`` on Linux / Unix systems. New :mod:`git <salt.modules.git>` and :mod:`mercurial <salt.modules.hg>` modules have been added for fans of distributed version control. In Progress Development ======================= Master Side State Compiling --------------------------- While we feel strongly that the advantages gained with minion side state compiling are very critical, it does prevent certain features that may be desired. 0.9.8 has support for initial master side state compiling, but many more components still need to be developed, it is hoped that these can be finished for 0.9.9. The goal is that states can be compiled on both the master and the minion allowing for compilation to be split between master and minion. Why will this be great? It will allow storing sensitive data on the master and sending it to some minions without all minions having access to it. This will be good for handling ssl certificates on front-end web servers for instance. Solaris Support --------------- Salt 0.9.8 sees the introduction of basic Solaris support. The daemon runs well, but grains and more of the modules need updating and testing. Windows Support --------------- Salt states on windows are now much more viable thanks to contributions from our community! States for file, service, local user, and local group management are more fully fleshed out along with network and disk modules. Windows users can also now manage registry entries using the new "reg" module.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/0.9.8.rst
0.715325
0.705252
0.9.8.rst
pypi
======================== Salt 0.9.5 Release Notes ======================== :release: 2012-01-15 Salt 0.9.5 is one of the largest steps forward in the development of Salt. 0.9.5 comes with many milestones, this release has seen the community of developers grow out to an international team of 46 code contributors and has many feature additions, feature enhancements, bug fixes and speed improvements. .. warning:: Be sure to :ref:`read the upgrade instructions <v0.9.5-msgpack>` about the switch to msgpack before upgrading! Community ========= Nothing has proven to have more value to the development of Salt that the outstanding community that has been growing at such a great pace around Salt. This has proven not only that Salt has great value, but also the expandability of Salt is as exponential as I originally intended. 0.9.5 has received over 600 additional commits since 0.9.4 with a swath of new committers. The following individuals have contributed to the development of 0.9.5: * Aaron Bull Schaefer * Antti Kaihola * Bas Tichelaar * Brad Barden * Brian Wagner * Byron Clark * Chris Scheller * Christer Edwards * Clint Savage * Corey Quinn * David Boucha * Eivind Uggedal * Eric Poelke * Evan Borgstrom * Jed Glazner * Jeff Schroeder * Jeffrey C. Ollie * Jonas Buckner * Kent Tenney * Martin Schnabel * Maxim Burgerhout * Mitch Anderson * Nathaniel Whiteinge * Seth House * Thomas S Hatch * Thomas Schreiber * Tor Hveem * lzyeval * syphernl This makes 21 new developers since 0.9.4 was released! To keep up with the growing community follow Salt on Black Duck Open Hub (https://www.openhub.net/p/salt), to join the Salt development community, fork Salt on GitHub, and get coding (https://github.com/saltstack/salt)! Major Features ============== .. _v0.9.5-msgpack: SPEED! Pickle to msgpack ------------------------ For a few months now we have been talking about moving away from Python pickles for network serialization, but a preferred serialization format had not yet been found. After an extensive performance testing period involving everything from JSON to protocol buffers, a clear winner emerged. Message Pack (https://msgpack.org/) proved to not only be the fastest and most compact, but also the most "salt like". Message Pack is simple, and the code involved is very small. The msgpack library for Python has been added directly to Salt. This move introduces a few changes to Salt. First off, Salt is no longer a "noarch" package, since the msgpack lib is written in C. Salt 0.9.5 will also have compatibility issues with 0.9.4 with the default configuration. We have gone through great lengths to avoid backwards compatibility issues with Salt, but changing the serialization medium was going to create issues regardless. Salt 0.9.5 is somewhat backwards compatible with earlier minions. A 0.9.5 master can command older minions, but only if the :conf_master:`serial` config value in the master is set to ``pickle``. This will tell the master to publish messages in pickle format and will allow the master to receive messages in both msgpack and pickle formats. Therefore **the suggested methods for upgrading** are either to just upgrade everything at once, or: 1. Upgrade the master to 0.9.5 2. Set :conf_master:`serial` to ``pickle`` in the master config 3. Upgrade the minions 4. Remove the ``serial`` option from the master config Since pickles can be used as a security exploit the ability for a master to accept pickles from minions at all will be removed in a future release. C Bindings for YAML -------------------- All of the YAML rendering is now done with the YAML C bindings. This speeds up all of the sls files when running states. Experimental Windows Support ---------------------------- David Boucha has worked tirelessly to bring initial support to Salt for Microsoft Windows operating systems. Right now the Salt Minion can run as a native Windows service and accept commands. In the weeks and months to come Windows will receive the full treatment and will have support for Salt States and more robust support for managing Windows systems. This is a big step forward for Salt to move entirely outside of the Unix world, and proves Salt is a viable cross platform solution. Big Thanks to Dave for his contribution here! Dynamic Module Distribution --------------------------- Many Salt users have expressed the desire to have Salt distribute in-house modules, states, renderers, returners, and grains. This support has been added in a number of ways: Modules via States ``````````````````` Now when salt modules are deployed to a minion via the state system as a file, then the modules will be automatically loaded into the active running minion - no restart required - and into the active running state. So custom state modules can be deployed and used in the same state run. Modules via Module Environment Directories ``````````````````````````````````````````` Under the file_roots each environment can now have directories that are used to deploy large groups of modules. These directories sync modules at the beginning of a state run on the minion, or can be manually synced via the Salt module :mod:`salt.modules.saltutil.sync_all`. The directories are named: * ``_modules`` * ``_states`` * ``_grains`` * ``_renderers`` * ``_returners`` The modules are pushed to their respective scopes on the minions. Module Reloading ---------------- Modules can now be reloaded without restarting the minion, this is done by calling the :mod:`salt.modules.sys.reload_modules` function. But wait, there's more! Now when a salt module of any type is added via states the modules will be automatically reloaded, allowing for modules to be laid down with states and then immediately used. Finally, all modules are reloaded when modules are dynamically distributed from the salt master. Enable / Disable Added to Service --------------------------------- A great deal of demand has existed for adding the capability to set services to be started at boot in the service module. This feature also comes with an overhaul of the service modules and initial systemd support. This means that the :mod:`service state <salt.states.service.running>` can now accept ``- enable: True`` to make sure a service is enabled at boot, and ``- enable: False`` to make sure it is disabled. Compound Target --------------- A new target type has been added to the lineup, the compound target. In previous versions the desired minions could only be targeted via a single specific target type, but now many target specifications can be declared. These targets can also be separated by and/or operators, so certain properties can be used to omit a node: .. code-block:: bash salt -C 'webserv* and G@os:Debian or E@db.*' test.ping will match all minions with ids starting with webserv via a glob and minions matching the ``os:Debian`` grain. Or minions that match the ``db.*`` regular expression. Node Groups ----------- Often the convenience of having a predefined group of minions to execute targets on is desired. This can be accomplished with the new nodegroups feature. Nodegroups allow for predefined compound targets to be declared in the master configuration file: .. code-block:: yaml nodegroups: group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com' group2: 'G@os:Debian and foo.domain.com' And then used via the ``-N`` option: .. code-block:: bash salt -N group1 test.ping Minion Side Data Store ----------------------- The data module introduces the initial approach into storing persistent data on the minions, specific to the minions. This allows for data to be stored on minions that can be accessed from the master or from the minion. The Minion datastore is young, and will eventually provide an interface similar to a more mature key/value pair server. Major Grains Improvement ------------------------- The Salt grains have been overhauled to include a massive amount of extra data. this includes hardware data, os data and salt specific data. Salt -Q is Useful Now --------------------- In the past the salt query system, which would display the data from recent executions would be displayed in pure Python, and it was unreadable. 0.9.5 has added the outputter system to the ``-Q`` option, thus enabling the salt query system to return readable output. Packaging Updates ================= Huge strides have been made in packaging Salt for distributions. These additions are thanks to our wonderful community where the work to set up packages has proceeded tirelessly. FreeBSD ------- Salt on FreeBSD? There a port for that: https://svnweb.freebsd.org/ports/head/sysutils/py-salt/ This port was developed and added by Christer Edwards. This also marks the first time Salt has been included in an upstream packaging system! Fedora and Red Hat Enterprise ------------------------------ Salt packages have been prepared for inclusion in the Fedora Project and in EPEL for Red Hat Enterprise 5 and 6. These packages are the result of the efforts made by Clint Savage (herlo). Debian/Ubuntu ------------- A team of many contributors have assisted in developing packages for Debian and Ubuntu. Salt is still actively seeking inclusion in upstream Debian and Ubuntu and the package data that has been prepared is being pushed through the needed channels for inclusion. These packages have been prepared with the help of: * Corey * Aaron Toponce * and` More to Come ------------ We are actively seeking inclusion in more distributions. Primarily getting Salt into Gentoo, SUSE, OpenBSD, and preparing Solaris support are all turning into higher priorities. Refinement ========== Salt continues to be refined into a faster, more stable and more usable application. 0.9.5 comes with more debug logging, more bug fixes and more complete support. More Testing, More BugFixes --------------------------- 0.9.5 comes with more bugfixes due to more testing than any previous release. The growing community and the introduction a dedicated QA environment have unearthed many issues that were hiding under the covers. This has further refined and cleaned the state interface, taking care of things from minor visual issues to repairing misleading data. Custom Exceptions ----------------- A custom exception module has been added to throw salt specific exceptions. This allows Salt to give much more granular error information. New Modules ----------- :mod:`data <salt.modules.data>` ``````````````````````````````` The new data module manages a persistent datastore on the minion. Big thanks to bastichelaar for his help refining this module :mod:`freebsdkmod <salt.modules.freebsdkmod>` ````````````````````````````````````````````` FreeBSD kernel modules can now be managed in the same way Salt handles Linux kernel modules. This module was contributed thanks to the efforts of Christer Edwards :mod:`gentoo_service <salt.modules.gentoo_service>` ``````````````````````````````````````````````````` Support has been added for managing services in Gentoo. Now Gentoo services can be started, stopped, restarted, enabled, disabled, and viewed. :mod:`pip <salt.modules.pip>` ````````````````````````````` The pip module introduces management for pip installed applications. Thanks goes to whitinge for the addition of the pip module :mod:`rh_service <salt.modules.rh_service>` ``````````````````````````````````````````` The rh_service module enables Red Hat and Fedora specific service management. Now Red Hat like systems come with extensive management of the classic init system used by Red Hat :mod:`saltutil <salt.modules.saltutil>` ``````````````````````````````````````` The saltutil module has been added as a place to hold functions used in the maintenance and management of salt itself. Saltutil is used to salt the salt minion. The saltutil module is presently used only to sync extension modules from the master server. :mod:`systemd <salt.modules.systemd>` ````````````````````````````````````` Systemd support has been added to Salt, now systems using this next generation init system are supported on systems running systemd. :mod:`virtualenv <salt.modules.virtualenv>` ``````````````````````````````````````````` The virtualenv module has been added to allow salt to create virtual Python environments. Thanks goes to whitinge for the addition of the virtualenv module :mod:`win_disk <salt.modules.win_disk>` ``````````````````````````````````````` Support for gathering disk information on Microsoft Windows minions The windows modules come courtesy of Utah_Dave :mod:`win_service <salt.modules.win_service>` ````````````````````````````````````````````` The win_service module adds service support to Salt for Microsoft Windows services :mod:`win_useradd <salt.modules.win_useradd>` ````````````````````````````````````````````` Salt can now manage local users on Microsoft Windows Systems :mod:`yumpkg5 <salt.modules.yumpkg5>` ````````````````````````````````````` The yumpkg module introduces in 0.9.4 uses the yum API to interact with the yum package manager. Unfortunately, on Red Hat 5 systems salt does not have access to the yum API because the yum API is running under Python 2.4 and Salt needs to run under Python 2.6. The yumpkg5 module bypasses this issue by shelling out to yum on systems where the yum API is not available. New States ----------- :mod:`mysql_database <salt.states.mysql_database>` `````````````````````````````````````````````````` The new mysql_database state adds the ability to systems running a mysql server to manage the existence of mysql databases. The mysql states are thanks to syphernl :mod:`mysql_user <salt.states.mysql_user>` `````````````````````````````````````````` The mysql_user state enables mysql user management. :mod:`virtualenv <salt.states.virtualenv>` `````````````````````````````````````````` The virtualenv state can manage the state of Python virtual environments. Thanks to Whitinge for the virtualenv state New Returners ------------- :mod:`cassandra_returner <salt.returners.cassandra_return>` ``````````````````````````````````````````````````````````` A returner allowing Salt to send data to a cassandra server. Thanks to Byron Clark for contributing this returner
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/0.9.5.rst
0.829216
0.770896
0.9.5.rst
pypi
========================= Salt 0.11.0 Release Notes ========================= :release: 2012-12-14 Salt 0.11.0 is here, with some highly sought after and exciting features. These features include the new overstate system, the reactor system, a new state run scope component called __context__, the beginning of the search system (still needs a great deal of work), multiple package states, the MySQL returner and a better system to arbitrarily reference outputters. It is also noteworthy that we are changing how we mark release numbers. For the life of the project we have been pushing every release with features and fixes as point releases. We will now be releasing point releases for only bug fixes on a more regular basis and major feature releases on a slightly less regular basis. This means that the next release will be a bugfix only release with a version number of 0.11.1. The next feature release will be named 0.12.0 and will mark the end of life for the 0.11 series. Major Features ============== OverState --------- The overstate system is a simple way to manage rolling state executions across many minions. The overstate allows for a state to depend on the successful completion of another state. Reactor System -------------- The new reactor system allows for a reactive logic engine to be created which can respond to events within a salted environment. The reactor system uses sls files to match events fired on the master with actions, enabling Salt to react to problems in an infrastructure. Your load-balanced group of webservers is under extra load? Spin up a new VM and add it to the group. Your fileserver is filling up? Send a notification to your sysadmin on call. The possibilities are endless! Module Context -------------- A new component has been added to the module loader system. The module context is a data structure that can hold objects for a given scope within the module. This allows for components that are initialized to be stored in a persistent context which can greatly speed up ongoing connections. Right now the best example can be found in the `cp` execution module. Multiple Package Management --------------------------- A long desired feature has been added to package management. By definition Salt States have always installed packages one at a time. On most platforms this is not the fastest way to install packages. Erik Johnson, aka terminalmage, has modified the package modules for many providers and added new capabilities to install groups of packages. These package groups can be defined as a list of packages available in repository servers: .. code-block:: yaml python_pkgs: pkg.installed: - pkgs: - python-mako - whoosh - python-git or specify based on the location of specific packages: .. code-block:: yaml python_pkgs: pkg.installed: - sources: - python-mako: http://some-rpms.org/python-mako.rpm - whoosh: salt://whoosh/whoosh.rpm - python-git: ftp://companyserver.net/python-git.rpm Search System ------------- The bones to the search system have been added. This is a very basic interface that allows for search backends to be added as search modules. The first supported search module is the whoosh search backend. Right now only the basic paths for the search system are in place, making this very experimental. Further development will involve improving the search routines and index routines for whoosh and other search backends. The search system has been made to allow for searching through all of the state and pillar files, configuration files and all return data from minion executions. Notable Changes =============== All previous versions of Salt have shared many directories between the master and minion. The default locations for keys, cached data and sockets has been shared by master and minion. This has created serious problems with running a master and a minion on the same systems. 0.11.0 changes the defaults to be separate directories. Salt will also attempt to migrate all of the old key data into the correct new directories, but if it is not successful it may need to be done manually. If your keys exhibit issues after updating make sure that they have been moved from ``/etc/salt/pki`` to ``/etc/salt/pki/{master,minion}``. The old setup will look like this: .. code-block:: text /etc/salt/pki |-- master.pem |-- master.pub |-- minions | `-- ragnarok.saltstack.net |-- minions_pre |-- minion.pem |-- minion.pub |-- minion_master.pub |-- minions_pre `-- minions_rejected With the accepted minion keys in ``/etc/salt/pki/minions``, the new setup places the accepted minion keys in ``/etc/salt/pki/master/minions``. .. code-block:: text /etc/salt/pki |-- master | |-- master.pem | |-- master.pub | |-- minions | | `-- ragnarok.saltstack.net | |-- minions_pre | `-- minions_rejected |-- minion | |-- minion.pem | |-- minion.pub | `-- minion_master.pub
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/0.11.0.rst
0.705988
0.656183
0.11.0.rst
pypi
========================= Salt 0.15.0 Release Notes ========================= :release: 2013-05-03 The many new features of Salt 0.15.0 have arrived! Salt 0.15.0 comes with many smaller features and a few larger ones. These features range from better debugging tools to the new Salt Mine system. Major Features ============== The Salt Mine ------------- First there was the peer system, allowing for commands to be executed from a minion to other minions to gather data live. Then there was the external job cache for storing and accessing long term data. Now the middle ground is being filled in with the Salt Mine. The Salt Mine is a system used to execute functions on a regular basis on minions and then store only the most recent data from the functions on the master, then the data is looked up via targets. The mine caches data that is public to all minions, so when a minion posts data to the mine all other minions can see it. IPV6 Support ------------ 0.13.0 saw the addition of initial IPV6 support but errors were encountered and it needed to be stripped out. This time the code covers more cases and must be explicitly enabled. But the support is much more extensive than before. Copy Files From Minions to the Master ------------------------------------- Minions have long been able to copy files down from the master file server, but until now files could not be easily copied from the minion up to the master. A new function called ``cp.push`` can push files from the minions up to the master server. The uploaded files are then cached on the master in the master cachedir for each minion. Better Template Debugging ------------------------- Template errors have long been a burden when writing states and pillar. 0.15.0 will now send the compiled template data to the debug log, this makes tracking down the intermittent stage templates much easier. So running state.sls or state.highstate with `-l debug` will now print out the rendered templates in the debug information. State Event Firing ------------------ The state system is now more closely tied to the master's event bus. Now when a state fails the failure will be fired on the master event bus so that the reactor can respond to it. Major Syndic Updates -------------------- The Syndic system has been basically re-written. Now it runs in a completely asynchronous way and functions primarily as an event broker. This means that the events fired on the syndic are now pushed up to the higher level master instead of the old method used which waited for the client libraries to return. This makes the syndic much more accurate and powerful, it also means that all events fired on the syndic master make it up the pipe as well making a reactor on the higher level master able to react to minions further downstream. Peer System Updates ------------------- The Peer System has been updated to run using the client libraries instead of firing directly over the publish bus. This makes the peer system much more consistent and reliable. Minion Key Revocation --------------------- In the past when a minion was decommissioned the key needed to be manually deleted on the master, but now a function on the minion can be used to revoke the calling minion's key: .. code-block:: bash $ salt-call saltutil.revoke_auth Function Return Codes --------------------- Functions can now be assigned numeric return codes to determine if the function executed successfully. While not all functions have been given return codes, many have and it is an ongoing effort to fill out all functions that might return a non-zero return code. Functions in Overstate ---------------------- The overstate system was originally created to just manage the execution of states, but with the addition of return codes to functions, requisite logic can now be used with respect to the overstate. This means that an overstate stage can now run single functions instead of just state executions. Pillar Error Reporting ---------------------- Previously if errors surfaced in pillar, then the pillar would consist of only an empty dict. Now all data that was successfully rendered stays in pillar and the render error is also made available. If errors are found in the pillar, states will refuse to run. Using Cached State Data ----------------------- Sometimes states are executed purely to maintain a specific state rather than to update states with new configs. This is grounds for the new cached state system. By adding `cache=True` to a state call the state will not be generated fresh from the master but the last state data to be generated will be used. If no previous state data is available then fresh data will be generated. Monitoring States ----------------- The new monitoring states system has been started. This is very young but allows for states to be used to configure monitoring routines. So far only one monitoring state is available, the ``disk.status`` state. As more capabilities are added to Salt UI the monitoring capabilities of Salt will continue to be expanded.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/0.15.0.rst
0.720958
0.736874
0.15.0.rst
pypi
======================== Salt 0.9.7 Release Notes ======================== :release: 2012-02-15 Salt 0.9.7 is here! The latest iteration of Salt brings more features and many fixes. This release is a great refinement over 0.9.6, adding many conveniences under the hood, as well as some features that make working with Salt much better. A few highlights include the new Job system, refinements to the requisite system in states, the ``mod_init`` interface for states, external node classification, search path to managed files in the file state, and refinements and additions to dynamic module loading. 0.9.7 also introduces the long developed (and oft changed) unit test framework and the initial unit tests. Major Features ============== Salt Jobs Interface ------------------- The new jobs interface makes the management of running executions much cleaner and more transparent. Building on the existing execution framework the jobs system allows clear introspection into the active running state of the running Salt interface. The Jobs interface is centered in the new minion side proc system. The minions now store msgpack serialized files under ``/var/cache/salt/proc``. These files keep track of the active state of processes on the minion. Functions in the saltutil Module ```````````````````````````````` A number of functions have been added to the saltutil module to manage and view the jobs: ``running`` - Returns the data of all running jobs that are found in the proc directory. ``find_job`` - Returns specific data about a certain job based on job id. ``signal_job`` - Allows for a given jid to be sent a signal. ``term_job`` - Sends a termination signal (``SIGTERM, 15``) to the process controlling the specified job. ``kill_job`` Sends a kill signal (``SIGKILL, 9``) to the process controlling the specified job. The jobs Runner --------------- A convenience runner front end and reporting system has been added as well. The jobs runner contains functions to make viewing data easier and cleaner. The jobs runner contains a number of functions... active `````` The active function runs ``saltutil.running`` on all minions and formats the return data about all running jobs in a much more usable and compact format. The active function will also compare jobs that have returned and jobs that are still running, making it easier to see what systems have completed a job and what systems are still being waited on. lookup_jid `````````` When jobs are executed the return data is sent back to the master and cached. By default is cached for 24 hours, but this can be configured via the ``keep_jobs`` option in the master configuration. Using the ``lookup_jid`` runner will display the same return data that the initial job invocation with the salt command would display. list_jobs ````````` Before finding a historic job, it may be required to find the job id. ``list_jobs`` will parse the cached execution data and display all of the job data for jobs that have already, or partially returned. External Node Classification ---------------------------- Salt can now use external node classifiers like Cobbler's ``cobbler-ext-nodes``. Salt uses specific data from the external node classifier. In particular the classes value denotes which sls modules to run, and the environment value sets to another environment. An external node classification can be set in the master configuration file via the ``external_nodes`` option: https://salt.readthedocs.io/en/latest/ref/configuration/master.html#external-nodes External nodes are loaded in addition to the top files. If it is intended to only use external nodes, do not deploy any top files. State Mod Init System --------------------- An issue arose with the pkg state. Every time a package was run Salt would need to refresh the package database. This made systems with slower package metadata refresh speeds much slower to work with. To alleviate this issue the ``mod_init`` interface has been added to salt states. The ``mod_init`` interface is a function that can be added to a state file. This function is called with the first state called. In the case of the pkg state, the ``mod_init`` function sets up a tag which makes the package database only refresh on the first attempt to install a package. In a nutshell, the ``mod_init`` interface allows a state to run any command that only needs to be run once, or can be used to set up an environment for working with the state. Source File Search Path ----------------------- The file state continues to be refined, adding speed and capabilities. This release adds the ability to pass a list to the source option. This list is then iterated over until the source file is found, and the first found file is used. The new syntax looks like this: .. code-block:: yaml /etc/httpd/conf/httpd.conf: file: - managed - source: - salt://httpd/httpd.conf - http://myserver/httpd.conf: md5=8c1fe119e6f1fd96bc06614473509bf1 The source option can take sources in the list from the salt file server as well as an arbitrary web source. If using an arbitrary web source the checksum needs to be passed as well for file verification. Refinements to the Requisite System ----------------------------------- A few discrepancies were still lingering in the requisite system, in particular, it was not possible to have a ``require`` and a ``watch`` requisite declared in the same state declaration. This issue has been alleviated, as well as making the requisite system run more quickly. Initial Unit Testing Framework ------------------------------ Because of the module system, and the need to test real scenarios, the development of a viable unit testing system has been difficult, but unit testing has finally arrived. Only a small amount of unit testing coverage has been developed, much more coverage will be in place soon. A huge thanks goes out to those who have helped with unit testing, and the contributions that have been made to get us where we are. Without these contributions unit tests would still be in the dark. Compound Targets Expanded ------------------------- Originally only support for ``and`` and ``or`` were available in the compound target. 0.9.7 adds the capability to negate compound targets with ``not``. Nodegroups in the Top File -------------------------- Previously the nodegroups defined in the master configuration file could not be used to match nodes for states. The nodegroups support has been expanded and the nodegroups defined in the master configuration can now be used to match minions in the top file.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/0.9.7.rst
0.729038
0.907885
0.9.7.rst
pypi
.. _release-0-17-0: ========================= Salt 0.17.0 Release Notes ========================= :release: 2013-09-26 The 0.17.0 release is a very exciting release of Salt, this brings to Salt some very powerful new features and advances. The advances range from the state system to the test suite, covering new transport capabilities and making states easier and more powerful, to extending Salt Virt and much more! The 0.17.0 release will also be the last release of Salt to follow the old 0.XX.X numbering system, the next release of Salt will change the numbering to be date based following this format: <Year>.<Month>.<Minor> So if the release happens in November of 2013 the number will be 13.11.0, the first bugfix release will be 13.11.1 and so forth. Major Features ============== Halite ------ The new Halite web GUI is now available on PyPI. A great deal of work has been put into Halite to make it fully event driven and amazingly fast. The Halite UI can be started from within the Salt Master (after being installed from PyPI), or standalone, and does not require an external database to run. It is very lightweight! This initial release of Halite is primarily the framework for the UI and the communication systems, making it easy to extend and build the UI up. It presently supports watching the event bus and firing commands over Salt. At this time, Halite is not available as a package, but installation documentation is available at: https://docs.saltproject.io/topics/tutorials/halite.html Halite is, like the rest of Salt, Open Source! Much more will be coming in the future of Halite! Salt SSH -------- The new ``salt-ssh`` command has been added to Salt. This system allows for remote execution and states to be run over ssh. The benefit here being, that salt can run relying only on the ssh agent, rather than requiring a minion to be deployed. The ``salt-ssh`` system runs states in a compatible way as Salt and states created and run with salt-ssh can be moved over to a standard salt deployment without modification. Since this is the initial release of salt-ssh, there is plenty of room for improvement, but it is fully operational, not just a bootstrap tool. Rosters ------- Salt is designed to have the minions be aware of the master and the master does not need to be aware of the location of the minions. The new salt roster system was created and designed to facilitate listing the targets for salt-ssh. The roster system, like most of Salt, is a plugin system, allowing for the list of systems to target to be derived from any pluggable backend. The rosters shipping with 0.17.0 are flat and scan. Flat is a file which is read in via the salt render system and the scan roster does simple network scanning to discover ssh servers. State Auto Order ---------------- This is a major change in how states are evaluated in Salt. State Auto Order is a new feature that makes states get evaluated and executed in the order in which they are defined in the sls file. This feature makes it very easy to see the finite order in which things will be executed, making Salt now, fully imperative AND fully declarative. The requisite system still takes precedence over the order in which states are defined, so no existing states should break with this change. But this new feature can be turned off by setting ``state_auto_order: False`` in the master config, thus reverting to the old lexicographical order. state.sls Runner ---------------- The ``state.sls`` runner has been created to allow for a more powerful system for orchestrating state runs and function calls across the salt minions. This new system uses the state system for organizing executions. This allows for states to be defined that are executed on the master to call states on minions via ``salt-run state.sls``. Salt Thin --------- Salt Thin is an exciting new component of Salt, this is the ability to execute Salt routines without any transport mechanisms installed, it is a pure python subset of Salt. Salt Thin does not have any networking capability, but can be dropped into any system with Python installed and then ``salt-call`` can be called directly. The Salt Thin system, is used by the ``salt-ssh`` command, but can still be used to just drop salt somewhere for easy use. Event Namespacing ----------------- Events have been updated to be much more flexible. The tags in events have all been namespaced allowing easier tracking of event names. Mercurial Fileserver Backend ---------------------------- The popular git fileserver backend has been joined by the mercurial fileserver backend, allowing the state tree to be managed entirely via mercurial. External Logging Handlers ------------------------- The external logging handler system allows for Salt to directly hook into any external logging system. Currently supported are sentry and logstash. Jenkins Testing --------------- The testing systems in Salt have been greatly enhanced, tests for salt are now executed, via jenkins.saltstack.com, across many supported platforms. Jenkins calls out to salt-cloud to create virtual machines on Rackspace, then the minion on the virtual machine checks into the master running on Jenkins where a state run is executed that sets up the minion to run tests and executes the test suite. This now automates the sequence of running platform tests and allows for continuous destructive tests to be run. Salt Testing Project -------------------- The testing libraries for salt have been moved out of the main salt code base and into a standalone codebase. This has been done to ease the use of the testing systems being used in salt based projects other than Salt itself. StormPath External Authentication --------------------------------- The external auth system now supports the fantastic Stormpath cloud based authentication system. LXC Support ----------- Extensive additions have been added to Salt for LXC support. This included the backend libs for managing LXC containers. Addition into the salt-virt system is still in the works. macOS User/Group Support ------------------------ Salt is now able to manage users and groups on Minions running macOS. However, at this time user passwords cannot be managed. Django ORM External Pillar -------------------------- Pillar data can now be derived from Django managed databases. Fixes from RC to release ------------------------ - Multiple documentation fixes - Add multiple source files + templating for :mod:`file.append <salt.states.file.append>` (:issue:`6905`) - Support sysctl configuration files in systemd>=207 (:issue:`7351`) - Add :mod:`file.search <salt.modules.file.search>` and :mod:`file.replace <salt.modules.file.replace>` - Fix cross-calling execution functions in provider overrides - Fix locale override for postgres (:issue:`4543`) - Fix Raspbian identification for service/pkg support (:issue:`7371`) - Fix :mod:`cp.push <salt.modules.cp.push>` file corruption (:issue:`6495`) - Fix ALT Linux password hash specification (:issue:`3474`) - Multiple salt-ssh-related fixes and improvements
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/0.17.0.rst
0.745954
0.713478
0.17.0.rst
pypi
.. _release-3005: ============================================= Salt 3005 release notes - Codename Phosphorus ============================================= Python 3.5 and 3.6 deprecation ------------------------------ This will be the last release we will support Python versions 3.5 and 3.6. In Salt release 3006, we will only support Python versions 3.7 and higher. Going forward, our policy will be to align with Python's supported versions. OS support end of life ---------------------- Debian and Raspbian 9 are now EOL, therefore we will no longer be building packages for these platforms. Raspberry Pi ------------ We will no longer build the Raspberry Pi packages after the 3005 release but will provide open sources project links in an updated announcement later. Please see the announcement for more details: https://saltproject.io/salt-project-announces-the-open-sourcing-of-several-saltstack-native-minions/ New packages available ---------------------- With the release of Salt 3005, we are pleased to announce the new onedir packages using pyinstaller are now out of beta and ready for production. These new packages make the installation process easier. Onedir packages install Salt with one directory that includes all the executables Salt needs to run effectively, including the version of Python and the required dependencies that Salt needs. These packages make it easier to use Salt out of the box without installing Python first. Going forward, any new OS platforms supported by the Salt Project from version 3005 can only be installed using onedir packages. For this release, this includes Redhat 9, Ubuntu 22.04, and Photon OS 3. The Salt Project will phase out the old ("classic") Salt package builds for currently supported operating systems by 3006. See `Upgrade to onedir <https://docs.saltproject.io/salt/install-guide/en/latest/topics/upgrade-to-onedir.html>`_ for more information. On the day of the Phosphorus release, the onedir packages will be available on https://repo.saltproject.io for each platform. The instructions for installing onedir packages and the classic packages will be available on the new `Salt Install Guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_. If you want to test out the packages today, you can install them from https://repo.saltproject.io/salt-dev/py3/ using the correct directory for your platform. If you find any issues with the packages, please open an issue on this repo: https://gitlab.com/saltstack/open/salt-pkg Classic, non-onedir packaging support ------------------------------------- The classic, non-onedir packaging system previously used for Salt will also be provided for platforms supported in previous Salt versions. The classic packaging will only be available for the 3005 release. The 3006 release and all releases going forward will only provide the onedir packages. Platform package support ------------------------ +--------------+---------------------+------------------------------+ | OS | New onedir packages | Classic, non-onedir packages | +==============+=====================+==============================+ | RHEL 7 | yes | yes | +--------------+---------------------+------------------------------+ | RHEL 8 | yes | yes | +--------------+---------------------+------------------------------+ | RHEL 9 | yes | no | +--------------+---------------------+------------------------------+ | Ubuntu 18.04 | yes | yes | +--------------+---------------------+------------------------------+ | Ubuntu 20.04 | yes | yes | +--------------+---------------------+------------------------------+ | Ubuntu 22.04 | yes | no | +--------------+---------------------+------------------------------+ | Debian 10 | yes | yes | +--------------+---------------------+------------------------------+ | Debian 11 | yes | yes | +--------------+---------------------+------------------------------+ | Raspbian 10 | no | yes | +--------------+---------------------+------------------------------+ | Raspbian 11 | no | yes | +--------------+---------------------+------------------------------+ | Fedora 35 | yes | yes | +--------------+---------------------+------------------------------+ | Fedora 36 | yes | yes | +--------------+---------------------+------------------------------+ | MacOS | yes | yes | +--------------+---------------------+------------------------------+ | Windows | yes | yes | +--------------+---------------------+------------------------------+ Repo paths ---------- +----------+-----------------------------------------------+-----------------------------------------+ | OS | Onedir path | Classic, Non-onedir path | +==========+===============================================+=========================================+ | RHEL | https://repo.saltproject.io/salt/py3/redhat/ | https://repo.saltproject.io/py3/redhat/ | +----------+-----------------------------------------------+-----------------------------------------+ | Ubuntu | https://repo.saltproject.io/salt/py3/ubuntu/ | https://repo.saltproject.io/py3/ubuntu/ | +----------+-----------------------------------------------+-----------------------------------------+ | Debian | https://repo.saltproject.io/salt/py3/debian/ | https://repo.saltproject.io/py3/debian/ | +----------+-----------------------------------------------+-----------------------------------------+ | Raspbian | Not available | https://repo.saltproject.io/py3/debian/ | +----------+-----------------------------------------------+-----------------------------------------+ | Fedora | Hosted on Fedora Repos | Hosted on Fedora Repos | +----------+-----------------------------------------------+-----------------------------------------+ | MacOS | https://repo.saltproject.io/salt/py3/osx/ | https://repo.saltproject.io/osx/ | +----------+-----------------------------------------------+-----------------------------------------+ | Windows | https://repo.saltproject.io/salt/py3/windows/ | https://repo.saltproject.io/windows/ | +----------+-----------------------------------------------+-----------------------------------------+ Note that the onedir paths above will not be available until the day of the Phosphorus release. How do I migrate to the onedir packages? ---------------------------------------- The migration path from the classic, non-onedir packages to the onedir packages will include: * Repo File: You need to update your repo file to point to the new repo paths for your platform. After the repo file is updated, upgrade your Salt packages. * Pip packages: You need to ensure any 3rd party pip packages are installed in the correct onedir path. This can be accomplished in two ways: * ``salt-pip install <package name>`` * Using the ``pip.installed`` Salt state. To install python packages into the system python environment, users must now provide the ``pip_bin`` or ``bin_env`` to the pip state module. For example: .. code-block:: yaml lib-foo: pip.installed: - pip_bin: /usr/bin/pip3 lib-bar: pip.installed: - bin_env: /usr/bin/python3 Known issues ------------ - To make use of Salt 3005 or later on a Salt master connected to SaltStack Config, you must use SaltStack Config version 8.9.0 or later. The root cause of the issue is a breaking change to ``AsyncClient._proc_function()``` in Salt, which is the function that the raas-master uses to run ``salt-run`` commands. As this is a private API, there's no expectation that the API should remain backward-compatible. It is recommended to upgrade SaltStack Config before upgrading your Salt masters. However, if a Salt master is upgraded to version 3005 before upgrading SaltStack Config, the upgrade can still be completed. After upgrading SaltStack Config, including the SSC plugin on each Salt master, restart the Salt masters. - Salt does not currently support napalm 4. Users will need to install napalm 3.x to ensure they do not run into issue #62468 Removed ------- - Deprecating and removing salt-unity. (#56055) - Removed support for macos mojave (#61130) - Removed `salt.utils.MultiprocessingProcess` and `salt.utils.SignalHandlingMultiprocessingProcess`. Please use `salt.utils.Process` and `salt.utils.SignalHandlingProcess` instead. (#61573) - Remove the grains.get_or_set_hash function. Please reference pillar and SDB documentation for secure ways to manage sensitive information. Grains are an insecure way to store secrets. (#61691) - Removed the `telnet_port`, `serial_type` and `console` parameters in salt/modules/virt.py. Use the `serials` and `consoles` parameters instead. Use the `serials` parameter with a value like ``{{{{'type': 'tcp', 'protocol': 'telnet', 'port': {}}}}}`` instead and a similar `consoles` parameter. (#61693) - Remove remove_lock in zypperpkg.py in favor of unhold. Remove add_lock in zypperpkg.py in favor of hold. (#61694) - Removed support for old-style Windows Group Policy names Recommended policy names will be displayed in comments (#61696) - Remove the feature flag feature.enable_slsvars_fixes and enable the fixes for `sls_path`, `tpl_file`, and `tpldir` by default. Enabling this behavior by default will fix the following: - tpldir: If your directory name and your SLS file name are the same tpldir used to return a ., now it returns the correct directory name. - slspath,slsdotpath,slscolonpath,sls_path: If an init.sls file is accessed by its explicit name path.to.init instead of path.to, init shows up as a directory for in various sls context parameters, now it will only show as a file. - tplfile: When using tplfile in a SLS file in the root directory of file roots it returns empty. Now it returns the filename. (#61697) - Remove SaltMessageServer.shutdown in favor of close. Remove LoadBalancerWorker.stop in favor of close. (#61698) - Removed the PyObjC dependency. This addresses problems with building a one dir build for macOS. It became problematic because depending on the macOS version, it pulls different dependencies, and we would either have to build a macos onedir for each macOS supported release, or ship a crippled onedir(because it would be tied to the macOS version where the onedir was built). Since it's currently not being used, it's removed. (#62432) Deprecated ---------- - In etcd_util, the recursive kwarg in the read and delete methods has been deprecated in favor of recurse for both client versions. In etcd_util, the index kwarg in the watch method has been deprecated in favor of start_revision for both client versions. In etcd_util, the waitIndex kwarg in the read method has been deprecated in favor of start_revision for both client versions. The etcd API v2 implementation has been deprecated in favor of etcd API v3. (#60325) - Deprecated transport kwarg inside salt.utils.event.get_event (#61275) - Deprecated netmiko_conn and pyeapi_conn in napalm_mod.py as these function should not be called from the CLI (#61566) - Deprecate all Azure cloud modules (#62183) - Deprecated ``defaults`` and ``preserve_context`` for ``salt.utils.functools.namespaced_function``. Additionally, the behavior when ``preserve_namespace=True`` was passed is now the default in order not to require duplicating imports on the modules that are namespacing functions. (#62272) - Added a pyinstaller hook that traverses the python used on the tiamat package to add all possible modules as hidden imports. (#62362) - Fix use of random shuffle and sample functions as Jinja filters (#62372) - All of the requirements provided in the requirements files are now included. The job of evaluating platform markers is not Salt's it's pip's. (#62392) - Update all platforms to use pycparser 2.21 or greater for Py 3.9 or higher, fixes fips fault with openssl v3.x (#62400) - Due to changes in the Netmiko library for the exception paths, need to check the version of Netmiko python library and then import the exceptions from different locations depending on the result. (#62405) - Deprecated the cassandra module in favor of the cassandra_cql module/returner. (#62327) Changed ------- - alternatives: Do not access /var/lib/dpkg/alternatives directly (#58745) - Enhance logging when there are errors at loading beacons (#60402) - Updated mysql cache module to also store updated timestamp, making it consistent with default cache module. Users of mysql cache should ensure database size before updating, as ALTER TABLE will add the timestamp column. (#61081) - Changed linux_shadow to test success of commands using cmd.retcode instead of cmd.run (#61932) - `zabbix.user_get` returns full user info with groups and medias `zabbix.user_addmedia` returns error for Zabbix 4.0+ due to `user.addmedia` method removal `zabbix.user_deletemedia` returns error for Zabbix 4.0+ due to `user.deletemedia` method removal (#62012) - "Sign before ending the testrun in x509.create_certificate" (#62100) Fixed ----- - Fix salt-ssh using sudo with a password (#8882) - Fix SSH password regex to not search for content after password:. (#25721) - Addressing a few issues when having keep_symlinks set to True with file.recurse. Also allow symlinks that are outside the salt fileserver root to be discoverable as symlinks when fileserver_followsymlinks is set to False. (#29562) - serialize to JSON only non string objects. (#35215) - Fix archive.extracted doesn't set user/group ownership correctly (#38605) - Make sys.argspec work on functions with annotations (#48735) - Fixed pdbedit.list_users with Samba 4.8 (#49648) - Fixes a scenario where ipv6 is enabled but the master is configured as an ipv4 IP address. (#49835) - Ensure that NOTIFY_SOCKET is not passed to child processes created with cmdmod unless it's set explicitly for such call. (#50851) - remove escaping of dbname in mysql.alter_db function. (#51559) - Fix runit module failing to find service if it is not symlinked. (#52759) - Changed manage.versions to report minions offline if minion call fails. (#53513) - Fixed events stream from /events endpoint not halting when auth token has expired. (#53742) - Fixed user.present which was breaking when updating workphone,homephone, fullname and "other" fields in case int was passed instead of string (#53961) - Fix error in webutil state module when attempting to grep a file that does not exist. (#53977) - Fixed ability to modify the "Audit: Force audit policy subcategory settings..." policy (#54301) - Fix timeout handling in netapi/saltnado. (#55394) - Fixing REST auth so that we actually support using ACLs from the REST server like we said in the documentation. (#55654) - Salt now correctly handles macOS after Py3.8 where python defaults to spawn instead of fork. (#55847) - Factor out sum and sorting of permissions into separate functions. Additionally, the same logic was applied to the rest_cherrypy netapi (#56495) - Display packages that are marked NoRemove in pkg.list_pkgs for Windows platforms (#56864) - Attempt to fix 56957 by detecting the broken recusion and stopping it. (#56957) - Fixed bytes vs. text issue when using sqlite for sdb backend. (#57133) - Ensure test is added to opts when using the state module with salt-ssh. (#57144) - Fixed RuntimeError OrderedDict mutated in network.managed for Debian systems. (#57721) - Improved the multiprocessing classes to better handle spawning platforms (#57742) - Config options are enforced according to config type (#57873) - fixed 57992 fix multi item kv v2 items read. (#57992) - Fixed thread leak during FQDN lookup when DNS entries had malformed PTR records, or other similar issues. (#58141) - Remove unnecessary dot in template that cause the bridge interface to fail on debian. Fixes #58195 (#58195) - update salt.module.schedule to check the job_args and job_kwargs for valid formatting. (#58329) - Allowe use of `roster` in salt.function state when using the SSH client. (#58662) - Detect new and legacy styles of calling module.run and support them both. (#58763) - Clean repo uri before checking if it's present, avoiding ghost change. (#58807) - Fix error "'__opts__' is not defined" when using the boto v2 modules (#58934) - hgfs: fix bytes vs str issues within hgfs. (#58963) - Fixes salt-ssh error when targetting IPs or hostnames directly. (#59033) - Allow for multiple configuration entries with keyword strict_config=False on yum-based systems (#59090) - Fixed error when running legacy code in winrepo.update_git_repos (#59101) - Clarify the persist argument in the scheduler module. Adding code in the list function to indicate if the schedule job is saved or not. (#59102) - Swap ret["retcode"] for ret.get("retcode") in the event that there is no retcode, eg. when a function is not passed with a module. (#59331) - Fix race condition when caching vault tokens (#59361) - The ssh module now accepts all ssh public key types as of openssh server version 8.7. (#59429) - Set default transport and port settings for Napalm NXOS, if not set. (#59448) - Use __salt_system_encoding__ when retrieving keystore certificate SHA1 str (#59503) - Fix error being thrown on empty flags list given to file.replace (#59554) - Update url for ez_setup.py script in virtualenv_mod.py (#59604) - Changed yumpkg module to normalize versions to strings when they were ambiguously floats (example version=3005.0). (#59705) - Fix pillar_roots.write on subdirectories broken after CVE-2021-25282 patch. (#59935) - Improved performance of zfs.filesystem_present and zfs.volume_present. When applying these states, only query specified ZFS properties rather than all properties. (#59970) - Fixed highstate outputter not displaying with salt.function in orchestration when module returns a dictionary. (#60029) - Update docs where python-dateutil is required for schedule. (#60070) - Send un-parsed username to LookupAccountName function (#60076) - Fix ability to set propagation on a folder to "this_folder_only" (#60103) - Fix name attribute access error in spm. (#60106) - Fix zeromq stream.send exception message (#60228) - Exit gracefully on ctrl+c. (#60242) - Corrected import statement for redis_cache in cluster mode. (#60272) - loader: Fix loading grains with annotations (#60285) - fix docker_network.present when com.docker.network.bridge.name is being used as the unixes can not have a bridge of the same name (#60316) - Fix exception in yumpkg.remove for not installed package on calling pkg.remove or pkg.removed (#60356) - Batch runs now return proper retcodes in a tuple of the form (result, retcode) (#60361) - Fixed issue with ansible roster __virtual__ when ansible is not installed. (#60370) - Fixed error being thrown when None was passed as src/defaults or dest to defaults.update and defaults.merge (#60431) - Allow for additional options for xmit hash policy in mode 4 NIC bonding on Redhat (#60583) - Properly detect VMware grains on Windows Server 2019+ (#60593) - Allow for minion failure to respond to job sent in batch mode (#60724) - The mac assistive execution module no longer shells out to change the database. (#60819) - Fix regression in win_timezone.get_zone which failed to resolve specific timezones that begin or end with d/s/t/o/f/_ characters (#60829) - The TCP transport resets it's unpacker on stream disconnects (#60831) - Moving the call to the validate function earlier to ensure that beacons are in the right format before we attempt to do anything to the configuration. Adding a generic validation to ensure the beacon configuration is in the wrong format when a validation function does not exist. (#60838) - Update the mac installer welcome and conclusion page, add docs for the salt-config tool (#60858) - Fixed external node classifier not callable due to wrong parameter (#60872) - Adjust Debian/Ubuntu package use of name 'ifenslave-2.6' to 'ifenslave' (#60876) - Clear and update the Pillar Cache when running saltutil.refresh_pillar. This only affects users that have `pillar_cache` set to True. If you do not want to clear the cache you can pass the kwarg `clean_cache=False` to `saltutil.refresh_pillar`. (#60897) - Handle the situation when apt repo lines have or do not have trailing slashes properly. (#60907) - Fixed Python 2 syntax for Python 3, allow for view objects returned by dictionary keys() function (#60909) - Fix REST CherryPY append the default permissions every request (#60955) - Do not consider "skipped" targets as failed for "ansible.playbooks" states (#60983) - Fix behavior for internal "_netlink_tool_remote_on" to filter results based on requested end (#61017) - schedule.job_status module: Convert datetime objects into formatted strings (#61043) - virt: don't crash if console doesn't have service or type attribute (#61054) - Fixed conflict between importlib_metada from Salt and importlib.metadata from Python 3.10 (#61062) - sys.argspec now works with pillar.get, vault.read_secret, and vault.list_secrets (#61084) - Set virtual grain on FreeBSD EC2 instances (#61094) - Fixed v3004 windows minion failing to open log file at C:\ProgramData\Salt Project\Salt\var\log\salt\minion (#61113) - Correct returned result to False when an error exception occurs for pip.installed (#61117) - fixed extend being too strict and wanting the system_type to exist when it is only needed for requisites. (#61121) - Fixed bug where deserialization in script engine would throw an error after all output was read. (#61124) - Adding missing import for salt.utils.beacons into beacons that were updated to use it. (#61135) - added exception catch to salt.utils.vt.terminal.isalive(). (#61160) - Re-factor transport to make them more plug-able (#61161) - Remove max zeromq pinned version due to issues on FreeBSD (#61163) - Fixing deltaproxy code to handle the situation where the control proxy is configured to control a proxy minion whose pillar data could not be loaded. (#61172) - Prevent get_tops from performing a Set operation on a List (#61176) - Make "state.highstate" to acts on concurrent flag. Simplify "transactional_update" module to not use SSH wrapper and allow more flexible execution (#61188) - Fix a failure with salt.utils.vault.make_request when namespace is not defined in the connection. (#61191) - Fix race condition in `salt.utils.verify.verify_env` and ignore directories starting with dot (#61192) - LGPO: Search for policies in a case-sensitive manner first, then fall back to non case-sensitive names (#61198) - Fixed state includes in dynamic environments (#61200) - Minimize the number of network connections minions to the master (#61247) - Fix salt-call event.event with pillar or grains (#61252) - Fixed failing dcs.compile_config where a successful compile errored with `AttributeError: 'list' object has no attribute 'get'`. (#61261) - Make the salt.utils.win_dacl.get_name() function include the "NT Security" prefix for Virtual Accounts. Virtual Accounts can only be added with the fully qualified name. (#61271) - Fixed tracebacks and print helpful error message when proxy_return = True but no platform or primary_ip set in NetBox pillar. (#61277) - Ensure opts is included in pack for minion_mods and config loads opts from the named_context. (#61297) - Added prefix length info for IPv6 addresses in Windows (#61316) - Handle MariaDB 10.5+ SLAVE MONITOR grant (#61331) - Fix secondary ip addresses being added to ip4_interfaces and ip6_interfaces at the same time (#61370) - Do not block the deltaproxy startup. Wrap the call to the individual proxy initialization functions in a try...except, catching the exception, logging an error and moving onto the next proxy minion. (#61377) - show_instance of hetzner cloud provider should enforce an action like the other ones (#61392) - Fix Hetzner Cloud config loading mechanism (#61399) - Sets correctly the lvm grain even when lvm's command execution outputs a WARNING (#61412) - Use net instead of sc in salt cloud when restarting the salt service (#61413) - Fix use_etag support in fileclient by removing case sensitivity of expected header (#61440) - Expand environment variables in the root_dir registry key (#61445) - Use salt.utils.path.readlink everywhere instead of os.readlink (#61458) - Fix state_aggregate minion option not respected (#61478) - Fixed wua.installed and wua.uptodate to return all changes, failures, and supersedences (#61479) - When running with test=True and there are no changes, don't show that there are changes. (#61483) - Fix issue with certutil when there's a space in the path to the certificate (#61494) - Fix cmdmod not respecting config for saltenv (#61507) - Convert Py 2'isms to Python 3, and add tests for set_filesystems on AIX (#61509) - Fix tracebacks caused by missing block device type and wrong mode used for gzip.open while calling inspector.export (#61530) - win_wua: Titles no longer limited to 40 characters (#61533) - Fixed error when using network module on RHEL 8 due to the name of the service changing from "network" to "NetworkManager". (#61538) - Allow symlink to be created even if source is missing on Windows (#61544) - Print jinja error context on `UndefinedError`. Previously `jinja2.exceptions.UndefinedError` resulted in a `SaltRenderError` without source file context, unlike all of the other Jinja exceptions handled in `salt/utils/templates.py`. (#61553) - Fix uptime on AIX systems when less than 24 hours (#61557) - Fix issue with state.show_state_usage when a saltenv is not referenced in any topfile (#61614) - Making the retry state system feature available when parallel is set to True. (#61630) - modules/aptpkg.SourceEntry: fix parsing lines with arbitrary comments in case HAS_APT=False (#61632) - Fix file.comment incorrectly reports changes in test mode (#61662) - Fix improper master caching of file listing in multiple dynamic environments (#61738) - When configured beacons are empty write an empty beacon configuration file. (#61741) - Fix file.replace updating mtime with no changes (#61743) - Fixed etcd_return being out of sync with the underlying etcd_util. (#61756) - Fixing items, values, and keys functions in the data module. (#61812) - Ensure that `salt://` URIs never contain backslashes, converting them to forward slashes instead. A specific situation to handle is caching files on Windows minions, where Jinja relative imports introduce a backslash into the path. (#61829) - Do not raise a UnicodeDecodeError when pillar cache cannot decode binary data. (#61836) - Don't rely on ``importlib.metadata``, even on Py3.10, use ``importlib_metadata`` instead. (#61839) - Fix the reporting of errors for file.directory in test mode (#61846) - Update Markup and contextfunction imports for jinja versions >=3.1. (#61848) - Update states.chef for version 16.x and 17.x Chef Infra Client output. (#61891) - Fixed some whitespace and ``pathlib.Path`` issues when not using the sytem ``aptsources`` package. (#61936) - fixed error when using backslash literal in file.replace (#61944) - Fix an issue where under spawning platforms, one could exhaust the available multiprocessing semaphores. (#61945) - Fix salt-cloud sync_after_install functionality (#61946) - Ensure that `common_prefix` matching only occurs if a directory name is identified (in the `archive.list` execution module function, which affects the `archive.extracted` state). (#61968) - When states are running in parallel, ensure that the total run time produced by the highstate outputter takes that into account. (#61999) - Temporary logging is now shutdown when logging has been configured. (#62005) - modules/lxd.FilesManager: fix memory leak through pylxd.modules.container.Container.FilesManager (#62006) - utils/jinja.SaltCacheLoader: fix leaking SaltCacheLoader through atexit.register (#62007) - Fixed errors on calling `zabbix_user.admin_password_present` state, due to changed error message in Zabbix 6.0 Fixed `zabbix.host_update` not mapping group ids list to list of dicts in format `[{"groupid": groupid}, ...]` Fixed `zabbix.user_update` not mapping usergroup id list to list of dicts in format `[{"usrgrpid": usrgrpid}, ...]` (#62012) - utils/yamlloader and yamlloader_old: fix leaking DuplicateKeyWarning through a warnings module (#62021) - Fix cache checking for Jinja templates (#62042) - Fixed salt.states.file.managed() for follow_symlinks=True and test=True (#62066) - Stop trigering the `GLIBC race condition <https://sourceware.org/bugzilla/show_bug.cgi?id=19329>`_ when parallelizing the resolution of the fqnds. (#62071) - Fix useradd functions hard-coded relative command name (#62087) - Fix #62092: Catch zmq.error.ZMQError to set HWM for zmq >= 3. Run ``git show 0be0941`` for more info. (#62092) - Allow emitatstartup to work when delay option is setup. (#62095) - Fix broken relative jinja includes in local mode bug introduced in #62043 (#62117) - Fix broken file.comment functionality introduced in #62045 (#62121) - Fixed an incompatibility preventing salt-cloud from deploying VMs on Proxmox VE 7 (#62154) - Fix sysctl functions hard-coded relative command name (#62164) - All of Salt's loaders now accept ``loaded_base_name`` as a keyword argument, allowing different namespacing the loaded modules. (#62186) - Only functions defined on the modules being loaded will be added to the lazy loader, functions imported from other modules, unless they are properly namespaced, are not included. (#62190) - Fixes issue in postgresql privileges detection: privileges on views were never retrieved and always recreated. (#57690) - Fix service.enabled error for unavailable service in test mode (#62258) - Fix variable reuse causing requisite_in problems (#62264) - Adding -G option to pkgdd cmd_prefix list when current_zone_only is True. (#62206) - Don't expect ``lsof`` to be installed when trying check which minions are connected. (#62303) - Fixed urlparse typo in rpmbuild_pkgbuild.py (#62442) - Fixing changes dict in pkg state to be consistent when installing and test=True. (#60995) - Use fire_event_async when expecting a coroutine (#62453) - Fixes import error under windows. (#62459) - account for revision number in formulas to account for difference between bottle and formula (#62466) - Fixed stacktrace on Windows when running pkg.list_pkgs (#62479) - Update sanitizing masking for Salt SSH to include additional password like strings. (#62483) - Fixes an issue where the minion could not connect to a master after 2 failed attempts (#62489) Added ----- - Added ability to request VPC peering connections in different AWS regions (boto_vpc). (#50394) - Added event return capability to Splunk returner (#50815) - Added allow downgrades support to apt upgrade (#52977) - added new grain for metadata to handle googles metadata differences (#53223) - Added win_shortcut execution and state module that does not prepend the current working directory to paths. Use shortcut.create and shortcut.present instead of file.shortcut. (#53706) - Add __env__ substitution inside file and pillar root paths (#55747) - Added support cpu hot add/remove, memory hot add, and nested virtualization to VMware salt-cloud driver. (#56144) - Add a consul state module with acl_present and acl_absent functions. (#58101) - Added restconf module/states/proxy code for network device automation (#59006) - Adds the ability to get version information from a file on Windows systems (#59702) - Add aptkey=False kwarg option to the aptpkg.py module and pkgrepo state. Apt-key is on the path to be deprecated. This will allow users to not use apt-key to manage the repo keys. It will set aptkey=False automatically if it does not detect apt-key exists on the machine. (#59785) - Added "Instant Clone" feature in the existing VMware Cloud module (#60004) - Added support for etcd API v3 (#60325) - Added `pkg.held` and `pkg.unheld` state functions for Zypper, YUM/DNF and APT. Improved `zypperpkg.hold` and `zypperpkg.unhold` functions. (#60432) - Added suse_ip module allowing to manage network interfaces on SUSE based Linux systems (#60702) - Support querying for JSON data in SQL external pillar (#60905) - Added support for yum and dnf on AIX (#60912) - Added percent success/failure of state runs in highstate summary output via new state_output_pct option (#60990) - Add support for retrieve IP-address from qemu agent by Salt-cloud on Proxmox (#61146) - Added new shortcut execution and state module to better handle UNC shortcuts and to test more thoroughly (#61170) - added yamllint utils module and yaml execution modules (#61182) - Add "--no-return-event" option to salt-call to prevent sending return event back to master. (#61188) - Add Etag support for file.managed web sources (#61270) - Adding the ability to add, delete, purge, and modify Salt scheduler jobs when the Salt minion is not running. (#61324) - Added a force option to file.symlink to overwrite an existing symlink with the same name (#61326) - `gpg_decrypt_must_succeed` config to prevent gpg renderer from failing silently (#61418) - Do not load a private copy of `__grains__` and `__salt__` for the sentry log handler if it is disabled. (#61484) - Add Jinja filters for itertools functions, flatten, and a state template workflow (#61502) - Add feature to allow roll-up of duplicate IDs with different names in highstate output (#61549) - Allow cp functions to derive saltenv from config if not explicitly set (#61562) - Multiprocessing logging no longer uses multiprocessing queues which penalized performance. Instead, each new process configures the terminal and file logging, and also any external logging handlers configured. (#61629) - Add a function to the freezer module for comparison of packages and repos in two frozen states (#61682) - Add grains_refresh_pre_exec option to allow grains to be refreshed before any operation (#61708) - Add possibility to pass extra parameters to salt-ssh pre flight script with `ssh_pre_flight_args` (#61715) - Add Etag support for archive.extracted web sources (#61763) - Add regex exclusions, full path matching, symlink following, and mtime/ctime comparison to file.tidied (#61823) - Add better handling for unit abbreviations and large values to salt.utils.stringutils.human_to_bytes (#61831) - Provide PyInstaller hooks that provide some runtime adjustments when Salt is running from a onedir (PyInstaller) bundled package. (#61864) - Add configurable onedir pip pypath location (#61937) - Add CNAME record support to the dig exec module (#61991) - Added support for changed user object in Zabbix 5.4+ Added compatibility with Zabbix 4.0+ for `zabbix.user_getmedia` method Added support for setting medias in `zabbix.user_update` for Zabbix 3.4+ (#62012) - Add ignore_missing parameter to file.comment state (#62044) - General improvements on the "ansiblegate" module: * Add "ansible.targets" method to gather Ansible inventory * Add "ansible.discover_playbooks" method to help collecting playbooks * Fix crash when running Ansible playbooks if ansible-playbook CLI output is not the expected JSON. * Fix issues when processing inventory and there are groups with no members. * Allow new types of targets for Ansible roster (#60056) - Add sample and shuffle functions from random (#62225) - Add "<tiamat> python" subcommand to allow execution or arbitrary scripts via bundled Python runtime (#62381)
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/releases/3005.rst
0.746046
0.785884
3005.rst
pypi
.. _beacons: ======= Beacons ======= Beacons let you use the Salt event system to monitor non-Salt processes. The beacon system allows the minion to hook into a variety of system processes and continually monitor these processes. When monitored activity occurs in a system process, an event is sent on the Salt event bus that can be used to trigger a :ref:`reactor <reactor>`. Salt beacons can currently monitor and send Salt events for many system activities, including: - file system changes - system load - service status - shell activity, such as user login - network and disk usage See :ref:`beacon modules <all-salt.beacons>` for a current list. .. note:: Salt beacons are an event generation mechanism. Beacons leverage the Salt :ref:`reactor <reactor>` system to make changes when beacon events occur. Configuring Beacons =================== Salt beacons do not require any changes to the system components that are being monitored, everything is configured using Salt. Beacons are typically enabled by placing a ``beacons:`` top level block in ``/etc/salt/minion`` or any file in ``/etc/salt/minion.d/`` such as ``/etc/salt/minion.d/beacons.conf`` or add it to pillars for that minion: .. code-block:: yaml beacons: inotify: - files: /etc/important_file: {} /opt: {} The beacon system, like many others in Salt, can also be configured via the minion pillar, grains, or local config file. .. note:: The `inotify` beacon only works on OSes that have `inotify` kernel support. Currently this excludes FreeBSD, macOS, and Windows. All beacon configuration is done using list based configuration. .. versionadded:: Neon Multiple copies of a particular Salt beacon can be configured by including the ``beacon_module`` parameter in the beacon configuration. .. code-block:: yaml beacons: watch_important_file: - files: /etc/important_file: {} - beacon_module: inotify watch_another_file: - files: /etc/another_file: {} - beacon_module: inotify Beacon Monitoring Interval -------------------------- Beacons monitor on a 1-second interval by default. To set a different interval, provide an ``interval`` argument to a beacon. The following beacons run on 5- and 10-second intervals: .. code-block:: yaml beacons: inotify: - files: /etc/important_file: {} /opt: {} - interval: 5 - disable_during_state_run: True load: - averages: 1m: - 0.0 - 2.0 5m: - 0.0 - 1.5 15m: - 0.1 - 1.0 - interval: 10 .. _avoid-beacon-event-loops: Avoiding Event Loops -------------------- It is important to carefully consider the possibility of creating a loop between a reactor and a beacon. For example, one might set up a beacon which monitors whether a file is read which in turn fires a reactor to run a state which in turn reads the file and re-fires the beacon. To avoid these types of scenarios, the ``disable_during_state_run`` argument may be set. If a state run is in progress, the beacon will not be run on its regular interval until the minion detects that the state run has completed, at which point the normal beacon interval will resume. .. code-block:: yaml beacons: inotify: - files: /etc/important_file: {} - disable_during_state_run: True .. _beacon-example: .. note:: For beacon writers: If you need extra stuff to happen, like closing file handles for the ``disable_during_state_run`` to actually work, you can add a `close()` function to the beacon to run those extra things. See the `inotify` beacon. Beacon Example ============== This example demonstrates configuring the :py:mod:`~salt.beacons.inotify` beacon to monitor a file for changes, and then restores the file to its original contents if a change was made. .. note:: The inotify beacon requires Pyinotify on the minion, install it using ``salt myminion pkg.install python-inotify``. Create Watched File ------------------- Create the file named ``/etc/important_file`` and add some simple content: .. code-block:: yaml important_config: True Add Beacon Configs to Minion ---------------------------- On the Salt minion, add the following configuration to ``/etc/salt/minion.d/beacons.conf``: .. code-block:: yaml beacons: inotify: - files: /etc/important_file: mask: - modify - disable_during_state_run: True Save the configuration file and restart the minion service. The beacon is now set up to notify salt upon modifications made to the file. .. note:: The ``disable_during_state_run: True`` parameter :ref:`prevents <avoid-beacon-event-loops>` the inotify beacon from generating reactor events due to salt itself modifying the file. .. _beacon-event-bus: View Events on the Master ------------------------- On your Salt master, start the event runner using the following command: .. code-block:: bash salt-run state.event pretty=true This runner displays events as they are received by the master on the Salt event bus. To test the beacon you set up in the previous section, make and save a modification to ``/etc/important_file``. You'll see an event similar to the following on the event bus: .. code-block:: json { "_stamp": "2015-09-09T15:59:37.972753", "data": { "change": "IN_IGNORED", "id": "larry", "path": "/etc/important_file" }, "tag": "salt/beacon/larry/inotify//etc/important_file" } This indicates that the event is being captured and sent correctly. Now you can create a reactor to take action when this event occurs. Create a Reactor ---------------- This reactor reverts the file named ``/etc/important_file`` to the contents provided by salt each time it is modified. Reactor SLS ``````````` On your Salt master, create a file named ``/srv/reactor/revert.sls``. .. note:: If the ``/srv/reactor`` directory doesn't exist, create it. .. code-block:: bash mkdir -p /srv/reactor Add the following to ``/srv/reactor/revert.sls``: .. code-block:: yaml revert-file: local.state.apply: - tgt: {{ data['data']['id'] }} - arg: - maintain_important_file .. note:: In addition to :ref:`setting <avoid-beacon-event-loops>` ``disable_during_state_run: True`` for an inotify beacon whose reaction is to modify the watched file, it is important to ensure the state applied is also :term:`idempotent <Idempotent>`. .. note:: The expression ``{{ data['data']['id'] }}`` :ref:`is correct <beacons-and-reactors>` as it matches the event structure :ref:`shown above <beacon-event-bus>`. State SLS ````````` Create the state sls file referenced by the reactor sls file. This state file will be located at ``/srv/salt/maintain_important_file.sls``. .. code-block:: yaml important_file: file.managed: - name: /etc/important_file - contents: | important_config: True Master Config ````````````` Configure the master to map the inotify beacon event to the ``revert`` reaction in ``/etc/salt/master.d/reactor.conf``: .. code-block:: yaml reactor: - salt/beacon/*/inotify//etc/important_file: - /srv/reactor/revert.sls .. note:: You can have only one top level ``reactor`` section, so if one already exists, add this code to the existing section. See :ref:`here <reactor-sls>` to learn more about reactor SLS syntax. Start the Salt Master in Debug Mode ----------------------------------- To help with troubleshooting, start the Salt master in debug mode: .. code-block:: bash service salt-master stop salt-master -l debug When debug logging is enabled, event and reactor data are displayed so you can discover syntax and other issues. Trigger the Reactor ------------------- On your minion, make and save another change to ``/etc/important_file``. On the Salt master, you'll see debug messages that indicate the event was received and the ``state.apply`` job was sent. When you inspect the file on the minion, you'll see that the file contents have been restored to ``important_config: True``. All beacons are configured using a similar process of enabling the beacon, writing a reactor SLS (and state SLS if needed), and mapping a beacon event to the reactor SLS. .. _writing-beacons: Writing Beacon Plugins ====================== Beacon plugins use the standard Salt loader system, meaning that many of the constructs from other plugin systems holds true, such as the ``__virtual__`` function. The important function in the Beacon Plugin is the ``beacon`` function. When the beacon is configured to run, this function will be executed repeatedly by the minion. The ``beacon`` function therefore cannot block and should be as lightweight as possible. The ``beacon`` also must return a list of dicts, each dict in the list will be translated into an event on the master. Beacons may also choose to implement a ``validate`` function which takes the beacon configuration as an argument and ensures that it is valid prior to continuing. This function is called automatically by the Salt loader when a beacon is loaded. Please see the :py:mod:`~salt.beacons.inotify` beacon as an example. The `beacon` Function --------------------- The beacons system will look for a function named `beacon` in the module. If this function is not present then the beacon will not be fired. This function is called on a regular basis and defaults to being called on every iteration of the minion, which can be tens to hundreds of times a second. This means that the `beacon` function cannot block and should not be CPU or IO intensive. The beacon function will be passed in the configuration for the executed beacon. This makes it easy to establish a flexible configuration for each called beacon. This is also the preferred way to ingest the beacon's configuration as it allows for the configuration to be dynamically updated while the minion is running by configuring the beacon in the minion's pillar. The Beacon Return ----------------- The information returned from the beacon is expected to follow a predefined structure. The returned value needs to be a list of dictionaries (standard python dictionaries are preferred, no ordered dicts are needed). The dictionaries represent individual events to be fired on the minion and master event buses. Each dict is a single event. The dict can contain any arbitrary keys but the 'tag' key will be extracted and added to the tag of the fired event. The return data structure would look something like this: .. code-block:: python [{"changes": ["/foo/bar"], "tag": "foo"}, {"changes": ["/foo/baz"], "tag": "bar"}] Calling Execution Modules ------------------------- Execution modules are still the preferred location for all work and system interaction to happen in Salt. For this reason the `__salt__` variable is available inside the beacon. Please be careful when calling functions in `__salt__`, while this is the preferred means of executing complicated routines in Salt not all of the execution modules have been written with beacons in mind. Watch out for execution modules that may be CPU intense or IO bound. Please feel free to add new execution modules and functions to back specific beacons. Distributing Custom Beacons --------------------------- Custom beacons can be distributed to minions via the standard methods, see :ref:`Modular Systems <modular-systems>`.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/beacons/index.rst
0.608361
0.694526
index.rst
pypi
.. _pillar: ================================= Storing Static Data in the Pillar ================================= Pillar is an interface for Salt designed to offer global values that can be distributed to minions. Pillar data is managed in a similar way as the Salt State Tree. Pillar was added to Salt in version 0.9.8 .. note:: Storing sensitive data Pillar data is compiled on the master. Additionally, pillar data for a given minion is only accessible by the minion for which it is targeted in the pillar configuration. This makes pillar useful for storing sensitive data specific to a particular minion. Declaring the Master Pillar =========================== The Salt Master server maintains a :conf_master:`pillar_roots` setup that matches the structure of the :conf_master:`file_roots` used in the Salt file server. Like :conf_master:`file_roots`, the :conf_master:`pillar_roots` option maps environments to directories. The pillar data is then mapped to minions based on matchers in a top file which is laid out in the same way as the state top file. Salt pillars can use the same matcher types as the standard :ref:`top file <states-top>`. conf_master:`pillar_roots` is configured just like :conf_master:`file_roots`. For example: .. code-block:: yaml pillar_roots: base: - /srv/pillar This example configuration declares that the base environment will be located in the ``/srv/pillar`` directory. It must not be in a subdirectory of the state tree. The top file used matches the name of the top file used for States, and has the same structure: ``/srv/pillar/top.sls`` .. code-block:: yaml base: '*': - packages In the above top file, it is declared that in the ``base`` environment, the glob matching all minions will have the pillar data found in the ``packages`` pillar available to it. Assuming the ``pillar_roots`` value of ``/srv/pillar`` taken from above, the ``packages`` pillar would be located at ``/srv/pillar/packages.sls``. Any number of matchers can be added to the base environment. For example, here is an expanded version of the Pillar top file stated above: /srv/pillar/top.sls: .. code-block:: yaml base: '*': - packages 'web*': - vim In this expanded top file, minions that match ``web*`` will have access to the ``/srv/pillar/packages.sls`` file, as well as the ``/srv/pillar/vim.sls`` file. Another example shows how to use other standard top matching types to deliver specific salt pillar data to minions with different properties. Here is an example using the ``grains`` matcher to target pillars to minions by their ``os`` grain: .. code-block:: yaml dev: 'os:Debian': - match: grain - servers Pillar definitions can also take a keyword argument ``ignore_missing``. When the value of ``ignore_missing`` is ``True``, all errors for missing pillar files are ignored. The default value for ``ignore_missing`` is ``False``. Here is an example using the ``ignore_missing`` keyword parameter to ignore errors for missing pillar files: .. code-block:: yaml base: '*': - servers - systems - ignore_missing: True Assuming that the pillar ``servers`` exists in the fileserver backend and the pillar ``systems`` doesn't, all pillar data from ``servers`` pillar is delivered to minions and no error for the missing pillar ``systems`` is noted under the key ``_errors`` in the pillar data delivered to minions. Should the ``ignore_missing`` keyword parameter have the value ``False``, an error for the missing pillar ``systems`` would produce the value ``Specified SLS 'servers' in environment 'base' is not available on the salt master`` under the key ``_errors`` in the pillar data delivered to minions. ``/srv/pillar/packages.sls`` .. code-block:: jinja {% if grains['os'] == 'RedHat' %} apache: httpd git: git {% elif grains['os'] == 'Debian' %} apache: apache2 git: git-core {% endif %} company: Foo Industries .. important:: See :ref:`Is Targeting using Grain Data Secure? <faq-grain-security>` for important security information. The above pillar sets two key/value pairs. If a minion is running RedHat, then the ``apache`` key is set to ``httpd`` and the ``git`` key is set to the value of ``git``. If the minion is running Debian, those values are changed to ``apache2`` and ``git-core`` respectively. All minions that have this pillar targeting to them via a top file will have the key of ``company`` with a value of ``Foo Industries``. Consequently this data can be used from within modules, renderers, State SLS files, and more via the shared pillar dictionary: .. code-block:: jinja apache: pkg.installed: - name: {{ pillar['apache'] }} .. code-block:: jinja git: pkg.installed: - name: {{ pillar['git'] }} Finally, the above states can utilize the values provided to them via Pillar. All pillar values targeted to a minion are available via the 'pillar' dictionary. As seen in the above example, Jinja substitution can then be utilized to access the keys and values in the Pillar dictionary. Note that you cannot just list key/value-information in ``top.sls``. Instead, target a minion to a pillar file and then list the keys and values in the pillar. Here is an example top file that illustrates this point: .. code-block:: yaml base: '*': - common_pillar And the actual pillar file at '/srv/pillar/common_pillar.sls': .. code-block:: yaml foo: bar boo: baz .. note:: When working with multiple pillar environments, assuming that each pillar environment has its own top file, the jinja placeholder ``{{ saltenv }}`` can be used in place of the environment name: .. code-block:: jinja {{ saltenv }}: '*': - common_pillar Yes, this is ``{{ saltenv }}``, and not ``{{ pillarenv }}``. The reason for this is because the Pillar top files are parsed using some of the same code which parses top files when :ref:`running states <running-highstate>`, so the pillar environment takes the place of ``{{ saltenv }}`` in the jinja context. Dynamic Pillar Environments =========================== If environment ``__env__`` is specified in :conf_master:`pillar_roots`, all environments that are not explicitly specified in :conf_master:`pillar_roots` will map to the directories from ``__env__``. This allows one to use dynamic git branch based environments for state/pillar files with the same file-based pillar applying to all environments. For example: .. code-block:: yaml pillar_roots: __env__: - /srv/pillar ext_pillar: - git: - __env__ https://example.com/git-pillar.git .. versionadded:: 2017.7.5,2018.3.1 Taking it one step further, ``__env__`` can also be used in the ``pillar_root`` filesystem path. It will be replaced with the actual ``pillarenv`` and searched for Pillar data to provide to the minion. Note this substitution ONLY occurs for the ``__env__`` environment. For instance, this configuration: .. code-block:: yaml pillar_roots: __env__: - /srv/__env__/pillar is equivalent to this static configuration: .. code-block:: yaml pillar_roots: dev: - /srv/dev/pillar test: - /srv/test/pillar prod: - /srv/prod/pillar .. versionadded:: 3005 Pillar Namespace Flattening =========================== The separate pillar SLS files all merge down into a single dictionary of key-value pairs. When the same key is defined in multiple SLS files, this can result in unexpected behavior if care is not taken to how the pillar SLS files are laid out. For example, given a ``top.sls`` containing the following: .. code-block:: yaml base: '*': - packages - services with ``packages.sls`` containing: .. code-block:: yaml bind: bind9 and ``services.sls`` containing: .. code-block:: yaml bind: named Then a request for the ``bind`` pillar key will only return ``named``. The ``bind9`` value will be lost, because ``services.sls`` was evaluated later. .. note:: Pillar files are applied in the order they are listed in the top file. Therefore conflicting keys will be overwritten in a 'last one wins' manner! For example, in the above scenario conflicting key values in ``services`` will overwrite those in ``packages`` because it's at the bottom of the list. It can be better to structure your pillar files with more hierarchy. For example the ``package.sls`` file could be configured like so: .. code-block:: yaml packages: bind: bind9 This would make the ``packages`` pillar key a nested dictionary containing a ``bind`` key. Pillar Dictionary Merging ========================= If the same pillar key is defined in multiple pillar SLS files, and the keys in both files refer to nested dictionaries, then the content from these dictionaries will be recursively merged. For example, keeping the ``top.sls`` the same, assume the following modifications to the pillar SLS files: ``packages.sls``: .. code-block:: yaml bind: package-name: bind9 version: 9.9.5 ``services.sls``: .. code-block:: yaml bind: port: 53 listen-on: any The resulting pillar dictionary will be: .. code-block:: bash $ salt-call pillar.get bind local: ---------- listen-on: any package-name: bind9 port: 53 version: 9.9.5 Since both pillar SLS files contained a ``bind`` key which contained a nested dictionary, the pillar dictionary's ``bind`` key contains the combined contents of both SLS files' ``bind`` keys. .. _pillar-include: Including Other Pillars ======================= .. versionadded:: 0.16.0 Pillar SLS files may include other pillar files, similar to State files. Two syntaxes are available for this purpose. The simple form simply includes the additional pillar as if it were part of the same file: .. code-block:: yaml include: - users The full include form allows two additional options -- passing default values to the templating engine for the included pillar file as well as an optional key under which to nest the results of the included pillar: .. code-block:: yaml include: - users: defaults: sudo: ['bob', 'paul'] key: users With this form, the included file (users.sls) will be nested within the 'users' key of the compiled pillar. Additionally, the 'sudo' value will be available as a template variable to users.sls. .. _pillar-in-memory: In-Memory Pillar Data vs. On-Demand Pillar Data =============================================== Since compiling pillar data is computationally expensive, the minion will maintain a copy of the pillar data in memory to avoid needing to ask the master to recompile and send it a copy of the pillar data each time pillar data is requested. This in-memory pillar data is what is returned by the :py:func:`pillar.item <salt.modules.pillar.item>`, :py:func:`pillar.get <salt.modules.pillar.get>`, and :py:func:`pillar.raw <salt.modules.pillar.raw>` functions. Also, for those writing custom execution modules, or contributing to Salt's existing execution modules, the in-memory pillar data is available as the ``__pillar__`` dunder dictionary. The in-memory pillar data is generated on minion start, and can be refreshed using the :py:func:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>` function: .. code-block:: bash salt '*' saltutil.refresh_pillar This function triggers the minion to asynchronously refresh the in-memory pillar data and will always return ``None``. In contrast to in-memory pillar data, certain actions trigger pillar data to be compiled to ensure that the most up-to-date pillar data is available. These actions include: - Running states - Running :py:func:`pillar.items <salt.modules.pillar.items>` Performing these actions will *not* refresh the in-memory pillar data. So, if pillar data is modified, and then states are run, the states will see the updated pillar data, but :py:func:`pillar.item <salt.modules.pillar.item>`, :py:func:`pillar.get <salt.modules.pillar.get>`, and :py:func:`pillar.raw <salt.modules.pillar.raw>` will not see this data unless refreshed using :py:func:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>`. If you are using the Pillar Cache and have set :conf_master:`pillar_cache` to `True`, the pillar cache can be updated either when you run :py:func:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>`, or using the pillar runner function :py:func:`pillar.clear_pillar_cache <salt.runners.pillar.clear_pillar_cache>`: .. code-block:: bash salt-run pillar.clear_pillar_cache 'minion' The pillar will not be updated when running :py:func:`pillar.items <salt.modules.pillar.items>` or a state for example. If you are using a Salt version before 3003, you would need to manually delete the cache file, located in Salt's master cache. For example, on linux the file would be in this directory: /var/cache/salt/master/pillar_cache/ .. _pillar-environments: How Pillar Environments Are Handled =================================== When multiple pillar environments are used, the default behavior is for the pillar data from all environments to be merged together. The pillar dictionary will therefore contain keys from all configured environments. The :conf_minion:`pillarenv` minion config option can be used to force the minion to only consider pillar configuration from a single environment. This can be useful in cases where one needs to run states with alternate pillar data, either in a testing/QA environment or to test changes to the pillar data before pushing them live. For example, assume that the following is set in the minion config file: .. code-block:: yaml pillarenv: base This would cause that minion to ignore all other pillar environments besides ``base`` when compiling the in-memory pillar data. Then, when running states, the ``pillarenv`` CLI argument can be used to override the minion's :conf_minion:`pillarenv` config value: .. code-block:: bash salt '*' state.apply mystates pillarenv=testing The above command will run the states with pillar data sourced exclusively from the ``testing`` environment, without modifying the in-memory pillar data. .. note:: When running states, the ``pillarenv`` CLI option does not require a :conf_minion:`pillarenv` option to be set in the minion config file. When :conf_minion:`pillarenv` is left unset, as mentioned above all configured environments will be combined. Running states with ``pillarenv=testing`` in this case would still restrict the states' pillar data to just that of the ``testing`` pillar environment. Starting in the 2017.7.0 release, it is possible to pin the pillarenv to the effective saltenv, using the :conf_minion:`pillarenv_from_saltenv` minion config option. When this is set to ``True``, if a specific saltenv is specified when running states, the ``pillarenv`` will be the same. This essentially makes the following two commands equivalent: .. code-block:: bash salt '*' state.apply mystates saltenv=dev salt '*' state.apply mystates saltenv=dev pillarenv=dev However, if a pillarenv is specified, it will override this behavior. So, the following command will use the ``qa`` pillar environment but source the SLS files from the ``dev`` saltenv: .. code-block:: bash salt '*' state.apply mystates saltenv=dev pillarenv=qa So, if a ``pillarenv`` is set in the minion config file, :conf_minion:`pillarenv_from_saltenv` will be ignored, and passing a ``pillarenv`` on the CLI will temporarily override :conf_minion:`pillarenv_from_saltenv`. Viewing Pillar Data =================== To view pillar data, use the :mod:`pillar <salt.modules.pillar>` execution module. This module includes several functions, each of them with their own use. These functions include: - :py:func:`pillar.item <salt.modules.pillar.item>` - Retrieves the value of one or more keys from the :ref:`in-memory pillar data <pillar-in-memory>`. - :py:func:`pillar.items <salt.modules.pillar.items>` - Compiles a fresh pillar dictionary and returns it, leaving the :ref:`in-memory pillar data <pillar-in-memory>` untouched. If pillar keys are passed to this function however, this function acts like :py:func:`pillar.item <salt.modules.pillar.item>` and returns their values from the :ref:`in-memory pillar data <pillar-in-memory>`. - :py:func:`pillar.raw <salt.modules.pillar.raw>` - Like :py:func:`pillar.items <salt.modules.pillar.items>`, it returns the entire pillar dictionary, but from the :ref:`in-memory pillar data <pillar-in-memory>` instead of compiling fresh pillar data. - :py:func:`pillar.get <salt.modules.pillar.get>` - Described in detail below. The :py:func:`pillar.get <salt.modules.pillar.get>` Function ============================================================ .. versionadded:: 0.14.0 The :mod:`pillar.get <salt.modules.pillar.get>` function works much in the same way as the ``get`` method in a python dict, but with an enhancement: nested dictonaries can be traversed using a colon as a delimiter. If a structure like this is in pillar: .. code-block:: yaml foo: bar: baz: qux Extracting it from the raw pillar in an sls formula or file template is done this way: .. code-block:: jinja {{ pillar['foo']['bar']['baz'] }} Now, with the new :mod:`pillar.get <salt.modules.pillar.get>` function the data can be safely gathered and a default can be set, allowing the template to fall back if the value is not available: .. code-block:: jinja {{ salt['pillar.get']('foo:bar:baz', 'qux') }} This makes handling nested structures much easier. .. note:: ``pillar.get()`` vs ``salt['pillar.get']()`` It should be noted that within templating, the ``pillar`` variable is just a dictionary. This means that calling ``pillar.get()`` inside of a template will just use the default dictionary ``.get()`` function which does not include the extra ``:`` delimiter functionality. It must be called using the above syntax (``salt['pillar.get']('foo:bar:baz', 'qux')``) to get the salt function, instead of the default dictionary behavior. Setting Pillar Data at the Command Line ======================================= Pillar data can be set at the command line like the following example: .. code-block:: bash salt '*' state.apply pillar='{"cheese": "spam"}' This will add a pillar key of ``cheese`` with its value set to ``spam``. .. note:: Be aware that when sending sensitive data via pillar on the command-line that the publication containing that data will be received by all minions and will not be restricted to the targeted minions. This may represent a security concern in some cases. .. _pillar-encryption: Pillar Encryption ================= Salt's renderer system can be used to decrypt pillar data. This allows for pillar items to be stored in an encrypted state, and decrypted during pillar compilation. Encrypted Pillar SLS -------------------- .. versionadded:: 2017.7.0 Consider the following pillar SLS file: .. code-block:: yaml secrets: vault: foo: | -----BEGIN PGP MESSAGE----- hQEMAw2B674HRhwSAQgAhTrN8NizwUv/VunVrqa4/X8t6EUulrnhKcSeb8sZS4th W1Qz3K2NjL4lkUHCQHKZVx/VoZY7zsddBIFvvoGGfj8+2wjkEDwFmFjGE4DEsS74 ZLRFIFJC1iB/O0AiQ+oU745skQkU6OEKxqavmKMrKo3rvJ8ZCXDC470+i2/Hqrp7 +KWGmaDOO422JaSKRm5D9bQZr9oX7KqnrPG9I1+UbJyQSJdsdtquPWmeIpamEVHb VMDNQRjSezZ1yKC4kCWm3YQbBF76qTHzG1VlLF5qOzuGI9VkyvlMaLfMibriqY73 zBbPzf6Bkp2+Y9qyzuveYMmwS4sEOuZL/PetqisWe9JGAWD/O+slQ2KRu9hNww06 KMDPJRdyj5bRuBVE4hHkkP23KrYr7SuhW2vpe7O/MvWEJ9uDNegpMLhTWruGngJh iFndxegN9w== =bAuo -----END PGP MESSAGE----- bar: this was unencrypted already baz: | -----BEGIN PGP MESSAGE----- hQEMAw2B674HRhwSAQf+Ne+IfsP2IcPDrUWct8sTJrga47jQvlPCmO+7zJjOVcqz gLjUKvMajrbI/jorBWxyAbF+5E7WdG9WHHVnuoywsyTB9rbmzuPqYCJCe+ZVyqWf 9qgJ+oUjcvYIFmH3h7H68ldqbxaAUkAOQbTRHdr253wwaTIC91ZeX0SCj64HfTg7 Izwk383CRWonEktXJpientApQFSUWNeLUWagEr/YPNFA3vzpPF5/Ia9X8/z/6oO2 q+D5W5mVsns3i2HHbg2A8Y+pm4TWnH6mTSh/gdxPqssi9qIrzGQ6H1tEoFFOEq1V kJBe0izlfudqMq62XswzuRB4CYT5Iqw1c97T+1RqENJCASG0Wz8AGhinTdlU5iQl JkLKqBxcBz4L70LYWyHhYwYROJWjHgKAywX5T67ftq0wi8APuZl9olnOkwSK+wrY 1OZi =7epf -----END PGP MESSAGE----- qux: - foo - bar - | -----BEGIN PGP MESSAGE----- hQEMAw2B674HRhwSAQgAg1YCmokrweoOI1c9HO0BLamWBaFPTMblOaTo0WJLZoTS ksbQ3OJAMkrkn3BnnM/djJc5C7vNs86ZfSJ+pvE8Sp1Rhtuxh25EKMqGOn/SBedI gR6N5vGUNiIpG5Tf3DuYAMNFDUqw8uY0MyDJI+ZW3o3xrMUABzTH0ew+Piz85FDA YrVgwZfqyL+9OQuu6T66jOIdwQNRX2NPFZqvon8liZUPus5VzD8E5cAL9OPxQ3sF f7/zE91YIXUTimrv3L7eCgU1dSxKhhfvA2bEUi+AskMWFXFuETYVrIhFJAKnkFmE uZx+O9R9hADW3hM5hWHKH9/CRtb0/cC84I9oCWIQPdI+AaPtICxtsD2N8Q98hhhd 4M7I0sLZhV+4ZJqzpUsOnSpaGyfh1Zy/1d3ijJi99/l+uVHuvmMllsNmgR+ZTj0= =LrCQ -----END PGP MESSAGE----- When the pillar data is compiled, the results will be decrypted: .. code-block:: bash # salt myminion pillar.items myminion: ---------- secrets: ---------- vault: ---------- bar: this was unencrypted already baz: rosebud foo: supersecret qux: - foo - bar - baz Salt must be told what portions of the pillar data to decrypt. This is done using the :conf_master:`decrypt_pillar` config option: .. code-block:: yaml decrypt_pillar: - 'secrets:vault': gpg The notation used to specify the pillar item(s) to be decrypted is the same as the one used in :py:func:`pillar.get <salt.modules.pillar.get>` function. If a different delimiter is needed, it can be specified using the :conf_master:`decrypt_pillar_delimiter` config option: .. code-block:: yaml decrypt_pillar: - 'secrets|vault': gpg decrypt_pillar_delimiter: '|' The name of the renderer used to decrypt a given pillar item can be omitted, and if so it will fall back to the value specified by the :conf_master:`decrypt_pillar_default` config option, which defaults to ``gpg``. So, the first example above could be rewritten as: .. code-block:: yaml decrypt_pillar: - 'secrets:vault' Encrypted Pillar Data on the CLI -------------------------------- .. versionadded:: 2016.3.0 The following functions support passing pillar data on the CLI via the ``pillar`` argument: - :py:func:`pillar.items <salt.modules.pillar.items>` - :py:func:`state.apply <salt.modules.state.apply_>` - :py:func:`state.highstate <salt.modules.state.highstate>` - :py:func:`state.sls <salt.modules.state.sls>` Triggerring decryption of this CLI pillar data can be done in one of two ways: 1. Using the ``pillar_enc`` argument: .. code-block:: bash # salt myminion pillar.items pillar_enc=gpg pillar='{foo: "-----BEGIN PGP MESSAGE-----\n\nhQEMAw2B674HRhwSAQf+OvPqEdDoA2fk15I5dYUTDoj1yf/pVolAma6iU4v8Zixn\nRDgWsaAnFz99FEiFACsAGDEFdZaVOxG80T0Lj+PnW4pVy0OXmXHnY2KjV9zx8FLS\nQxfvmhRR4t23WSFybozfMm0lsN8r1vfBBjbK+A72l0oxN78d1rybJ6PWNZiXi+aC\nmqIeunIbAKQ21w/OvZHhxH7cnIiGQIHc7N9nQH7ibyoKQzQMSZeilSMGr2abAHun\nmLzscr4wKMb+81Z0/fdBfP6g3bLWMJga3hSzSldU9ovu7KR8rDJI1qOlENj3Wm8C\nwTpDOB33kWIKMqiAjY3JFtb5MCHrafyggwQL7cX1+tI+AbSO6kZpbcDfzetb77LZ\nxc5NWnnGK4pGoqq4MAmZshw98RpecSHKMosto2gtiuWCuo9Zn5cV/FbjZ9CTWrQ=\n=0hO/\n-----END PGP MESSAGE-----"}' The newlines in this example are specified using a literal ``\n``. Newlines can be replaced with a literal ``\n`` using ``sed``: .. code-block:: bash $ echo -n bar | gpg --armor --trust-model always --encrypt -r user@domain.tld | sed ':a;N;$!ba;s/\n/\\n/g' .. note:: Using ``pillar_enc`` will perform the decryption minion-side, so for this to work it will be necessary to set up the keyring in ``/etc/salt/gpgkeys`` on the minion just as one would typically do on the master. The easiest way to do this is to first export the keys from the master: .. code-block:: bash # gpg --homedir /etc/salt/gpgkeys --export-secret-key -a user@domain.tld >/tmp/keypair.gpg Then, copy the file to the minion, setup the keyring, and import: .. code-block:: bash # mkdir -p /etc/salt/gpgkeys # chmod 0700 /etc/salt/gpgkeys # gpg --homedir /etc/salt/gpgkeys --list-keys # gpg --homedir /etc/salt/gpgkeys --import --allow-secret-key-import keypair.gpg The ``--list-keys`` command is run create a keyring in the newly-created directory. Pillar data which is decrypted minion-side will still be securely transferred to the master, since the data sent between minion and master is encrypted with the master's public key. 2. Use the :conf_master:`decrypt_pillar` option. This is less flexible in that the pillar key passed on the CLI must be pre-configured on the master, but it doesn't require a keyring to be setup on the minion. One other caveat to this method is that pillar decryption on the master happens at the end of pillar compilation, so if the encrypted pillar data being passed on the CLI needs to be referenced by pillar or ext_pillar *during pillar compilation*, it *must* be decrypted minion-side. Adding New Renderers for Decryption ----------------------------------- Those looking to add new renderers for decryption should look at the :mod:`gpg <salt.renderers.gpg>` renderer for an example of how to do so. The function that performs the decryption should be recursive and be able to traverse a mutable type such as a dictionary, and modify the values in-place. Once the renderer has been written, :conf_master:`decrypt_pillar_renderers` should be modified so that Salt allows it to be used for decryption. If the renderer is being submitted upstream to the Salt project, the renderer should be added in `salt/renderers/`_. Additionally, the following should be done: - Both occurrences of :conf_master:`decrypt_pillar_renderers` in `salt/config/__init__.py`_ should be updated to include the name of the new renderer so that it is included in the default value for this config option. - The documentation for the :conf_master:`decrypt_pillar_renderers` config option in the `master config file`_ and `minion config file`_ should be updated to show the correct new default value. - The commented example for the :conf_master:`decrypt_pillar_renderers` config option in the `master config template`_ should be updated to show the correct new default value. .. _`salt/renderers/`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/salt/renderers/ .. _`salt/config/__init__.py`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/salt/config/__init__.py .. _`master config file`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/doc/ref/configuration/master.rst .. _`minion config file`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/doc/ref/configuration/minion.rst .. _`master config template`: https://github.com/saltstack/salt/tree/|repo_primary_branch|/conf/master Binary Data in the Pillar ========================= Salt has partial support for binary pillar data. .. note:: There are some situations (such as salt-ssh) where only text (ASCII or Unicode) is allowed. The simplest way to embed binary data in your pillar is to make use of YAML's built-in binary data type, which requires base64 encoded data. .. code-block:: yaml salt_pic: !!binary iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAMAAAC67D+PAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAA Then you can use it as a ``contents_pillar`` in a state: .. code-block:: yaml /tmp/salt.png: file.managed: - contents_pillar: salt_pic It is also possible to add ASCII-armored encrypted data to pillars, as mentioned in the Pillar Encryption section. Master Config in Pillar ======================= For convenience the data stored in the master configuration file can be made available in all minion's pillars. This makes global configuration of services and systems very easy but may not be desired if sensitive data is stored in the master configuration. This option is disabled by default. To enable the master config from being added to the pillar set :conf_minion:`pillar_opts` to ``True`` in the minion config file: .. code-block:: yaml pillar_opts: True Minion Config in Pillar ======================= Minion configuration options can be set on pillars. Any option that you want to modify, should be in the first level of the pillars, in the same way you set the options in the config file. For example, to configure the MySQL root password to be used by MySQL Salt execution module, set the following pillar variable: .. code-block:: yaml mysql.pass: hardtoguesspassword Master Provided Pillar Error ============================ By default if there is an error rendering a pillar, the detailed error is hidden and replaced with: .. code-block:: bash Rendering SLS 'my.sls' failed. Please see master log for details. The error is protected because it's possible to contain templating data which would give that minion information it shouldn't know, like a password! To have the master provide the detailed error that could potentially carry protected data set ``pillar_safe_render_error`` to ``False``: .. code-block:: yaml pillar_safe_render_error: False .. toctree:: ../tutorials/pillar
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/pillar/index.rst
0.892545
0.750324
index.rst
pypi
.. _external-job-cache: ========================================= Storing Job Results in an External System ========================================= After a job executes, job results are returned to the Salt Master by each Salt Minion. These results are stored in the :ref:`Default Job Cache <default_job_cache>`. In addition to the Default Job Cache, Salt provides two additional mechanisms to send job results to other systems (databases, local syslog, and others): * External Job Cache * Master Job Cache The major difference between these two mechanism is from where results are returned (from the Salt Master or Salt Minion). Configuring either of these options will also make the :py:mod:`Jobs Runner functions <salt.runners.jobs>` to automatically query the remote stores for information. External Job Cache - Minion-Side Returner ----------------------------------------- When an External Job Cache is configured, data is returned to the Default Job Cache on the Salt Master like usual, and then results are also sent to an External Job Cache using a Salt returner module running on the Salt Minion. .. image:: /_static/external-job-cache.png :align: center * Advantages: Data is stored without placing additional load on the Salt Master. * Disadvantages: Each Salt Minion connects to the external job cache, which can result in a large number of connections. Also requires additional configuration to get returner module settings on all Salt Minions. Master Job Cache - Master-Side Returner --------------------------------------- .. versionadded:: 2014.7.0 Instead of configuring an External Job Cache on each Salt Minion, you can configure the Master Job Cache to send job results from the Salt Master instead. In this configuration, Salt Minions send data to the Default Job Cache as usual, and then the Salt Master sends the data to the external system using a Salt returner module running on the Salt Master. .. image:: /_static/master-job-cache.png :align: center * Advantages: A single connection is required to the external system. This is preferred for databases and similar systems. * Disadvantages: Places additional load on your Salt Master. Configure an External or Master Job Cache ----------------------------------------- Step 1: Understand Salt Returners ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before you configure a job cache, it is essential to understand Salt returner modules ("returners"). Returners are pluggable Salt Modules that take the data returned by jobs, and then perform any necessary steps to send the data to an external system. For example, a returner might establish a connection, authenticate, and then format and transfer data. The Salt Returner system provides the core functionality used by the External and Master Job Cache systems, and the same returners are used by both systems. Salt currently provides many different returners that let you connect to a wide variety of systems. A complete list is available at :ref:`all Salt returners <all-salt.returners>`. Each returner is configured differently, so make sure you read and follow the instructions linked from that page. For example, the MySQL returner requires: * A database created using provided schema (structure is available at :mod:`MySQL returner <salt.returners.mysql>`) * A user created with privileges to the database * Optional SSL configuration A simpler returner, such as Slack or HipChat, requires: * An API key/version * The target channel/room * The username that should be used to send the message Step 2: Configure the Returner ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After you understand the configuration and have the external system ready, the configuration requirements must be declared. External Job Cache """""""""""""""""" The returner configuration settings can be declared in the Salt Minion configuration file, the Minion's pillar data, or the Minion's grains. If ``external_job_cache`` configuration settings are specified in more than one place, the options are retrieved in the following order. The first configuration location that is found is the one that will be used. - Minion configuration file - Minion's grains - Minion's pillar data Master Job Cache """""""""""""""" The returner configuration settings for the Master Job Cache should be declared in the Salt Master's configuration file. Configuration File Examples """"""""""""""""""""""""""" MySQL requires: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 Slack requires: .. code-block:: yaml slack.channel: 'channel' slack.api_key: 'key' slack.from_name: 'name' After you have configured the returner and added settings to the configuration file, you can enable the External or Master Job Cache. Step 3: Enable the External or Master Job Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configuration is a single line that specifies an already-configured returner to use to send all job data to an external system. External Job Cache """""""""""""""""" To enable a returner as the External Job Cache (Minion-side), add the following line to the Salt Master configuration file: .. code-block:: yaml ext_job_cache: <returner> For example: .. code-block:: yaml ext_job_cache: mysql .. note:: When configuring an External Job Cache (Minion-side), the returner settings are added to the Minion configuration file, but the External Job Cache setting is configured in the Master configuration file. Master Job Cache """""""""""""""" To enable a returner as a Master Job Cache (Master-side), add the following line to the Salt Master configuration file: .. code-block:: yaml master_job_cache: <returner> For example: .. code-block:: yaml master_job_cache: mysql Verify that the returner configuration settings are in the Master configuration file, and be sure to restart the salt-master service after you make configuration changes. (``service salt-master restart``).
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/jobs/external_cache.rst
0.886445
0.696894
external_cache.rst
pypi
.. _spm-development: ===================== SPM Development Guide ===================== This document discusses developing additional code for SPM. SPM-Specific Loader Modules =========================== SPM was designed to behave like traditional package managers, which apply files to the filesystem and store package metadata in a local database. However, because modern infrastructures often extend beyond those use cases, certain parts of SPM have been broken out into their own set of modules. Each function that accepts arguments has a set of required and optional arguments. Take note that SPM will pass all arguments in, and therefore each function must accept each of those arguments. However, arguments that are marked as required are crucial to SPM's core functionality, while arguments that are marked as optional are provided as a benefit to the module, if it needs to use them. .. _spm-development-pkgdb: Package Database ---------------- By default, the package database is stored using the ``sqlite3`` module. This module was chosen because support for SQLite3 is built into Python itself. Modules for managing the package database are stored in the ``salt/spm/pkgdb/`` directory. A number of functions must exist to support database management. init() `````` Get a database connection, and initialize the package database if necessary. This function accepts no arguments. If a database is used which supports a connection object, then that connection object is returned. For instance, the ``sqlite3`` module returns a ``connect()`` object from the ``sqlite3`` library: .. code-block:: python def myfunc(): conn = sqlite3.connect(__opts__["spm_db"], isolation_level=None) ... return conn SPM itself will not use this connection object; it will be passed in as-is to the other functions in the module. Therefore, when you set up this object, make sure to do so in a way that is easily usable throughout the module. info() `````` Return information for a package. This generally consists of the information that is stored in the ``FORMULA`` file in the package. The arguments that are passed in, in order, are ``package`` (required) and ``conn`` (optional). ``package`` is the name of the package, as specified in the ``FORMULA``. ``conn`` is the connection object returned from ``init()``. list_files() ```````````` Return a list of files for an installed package. Only the filename should be returned, and no other information. The arguments that are passed in, in order, are ``package`` (required) and ``conn`` (optional). ``package`` is the name of the package, as specified in the ``FORMULA``. ``conn`` is the connection object returned from ``init()``. register_pkg() `````````````` Register a package in the package database. Nothing is expected to be returned from this function. The arguments that are passed in, in order, are ``name`` (required), ``formula_def`` (required), and ``conn`` (optional). ``name`` is the name of the package, as specified in the ``FORMULA``. ``formula_def`` is the contents of the ``FORMULA`` file, as a ``dict``. ``conn`` is the connection object returned from ``init()``. register_file() ``````````````` Register a file in the package database. Nothing is expected to be returned from this function. The arguments that are passed in are ``name`` (required), ``member`` (required), ``path`` (required), ``digest`` (optional), and ``conn`` (optional). ``name`` is the name of the package. ``member`` is a ``tarfile`` object for the package file. It is included, because it contains most of the information for the file. ``path`` is the location of the file on the local filesystem. ``digest`` is the SHA1 checksum of the file. ``conn`` is the connection object returned from ``init()``. unregister_pkg() ```````````````` Unregister a package from the package database. This usually only involves removing the package's record from the database. Nothing is expected to be returned from this function. The arguments that are passed in, in order, are ``name`` (required) and ``conn`` (optional). ``name`` is the name of the package, as specified in the ``FORMULA``. ``conn`` is the connection object returned from ``init()``. unregister_file() ````````````````` Unregister a package from the package database. This usually only involves removing the package's record from the database. Nothing is expected to be returned from this function. The arguments that are passed in, in order, are ``name`` (required), ``pkg`` (optional) and ``conn`` (optional). ``name`` is the path of the file, as it was installed on the filesystem. ``pkg`` is the name of the package that the file belongs to. ``conn`` is the connection object returned from ``init()``. db_exists() ``````````` Check to see whether the package database already exists. This is the path to the package database file. This function will return ``True`` or ``False``. The only argument that is expected is ``db_``, which is the package database file. .. _spm-development-pkgfiles: Package Files ------------- By default, package files are installed using the ``local`` module. This module applies files to the local filesystem, on the machine that the package is installed on. Modules for managing the package database are stored in the ``salt/spm/pkgfiles/`` directory. A number of functions must exist to support file management. init() `````` Initialize the installation location for the package files. Normally these will be directory paths, but other external destinations such as databases can be used. For this reason, this function will return a connection object, which can be a database object. However, in the default ``local`` module, this object is a dict containing the paths. This object will be passed into all other functions. Three directories are used for the destinations: ``formula_path``, ``pillar_path``, and ``reactor_path``. ``formula_path`` is the location of most of the files that will be installed. The default is specific to the operating system, but is normally ``/srv/salt/``. ``pillar_path`` is the location that the ``pillar.example`` file will be installed to. The default is specific to the operating system, but is normally ``/srv/pillar/``. ``reactor_path`` is the location that reactor files will be installed to. The default is specific to the operating system, but is normally ``/srv/reactor/``. check_existing() ```````````````` Check the filesystem for existing files. All files for the package will be checked, and if any are existing, then this function will normally state that SPM will refuse to install the package. This function returns a list of the files that exist on the system. The arguments that are passed into this function are, in order: ``package`` (required), ``pkg_files`` (required), ``formula_def`` (formula_def), and ``conn`` (optional). ``package`` is the name of the package that is to be installed. ``pkg_files`` is a list of the files to be checked. ``formula_def`` is a copy of the information that is stored in the ``FORMULA`` file. ``conn`` is the file connection object. install_file() `````````````` Install a single file to the destination (normally on the filesystem). Nothing is expected to be returned from this function. This function returns the final location that the file was installed to. The arguments that are passed into this function are, in order, ``package`` (required), ``formula_tar`` (required), ``member`` (required), ``formula_def`` (required), and ``conn`` (optional). ``package`` is the name of the package that is to be installed. ``formula_tar`` is the tarfile object for the package. This is passed in so that the function can call ``formula_tar.extract()`` for the file. ``member`` is the tarfile object which represents the individual file. This may be modified as necessary, before being passed into ``formula_tar.extract()``. ``formula_def`` is a copy of the information from the ``FORMULA`` file. ``conn`` is the file connection object. remove_file() ````````````` Remove a single file from file system. Normally this will be little more than an ``os.remove()``. Nothing is expected to be returned from this function. The arguments that are passed into this function are, in order, ``path`` (required) and ``conn`` (optional). ``path`` is the absolute path to the file to be removed. ``conn`` is the file connection object. hash_file() ``````````` Returns the hexdigest hash value of a file. The arguments that are passed into this function are, in order, ``path`` (required), ``hashobj`` (required), and ``conn`` (optional). ``path`` is the absolute path to the file. ``hashobj`` is a reference to ``hashlib.sha1()``, which is used to pull the ``hexdigest()`` for the file. ``conn`` is the file connection object. This function will not generally be more complex than: .. code-block:: python def hash_file(path, hashobj, conn=None): with salt.utils.files.fopen(path, "r") as f: hashobj.update(f.read()) return hashobj.hexdigest() path_exists() ````````````` Check to see whether the file already exists on the filesystem. Returns ``True`` or ``False``. This function expects a ``path`` argument, which is the absolute path to the file to be checked. path_isdir() ```````````` Check to see whether the path specified is a directory. Returns ``True`` or ``False``. This function expects a ``path`` argument, which is the absolute path to be checked.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/spm/dev.rst
0.833833
0.855248
dev.rst
pypi
.. _spm-formula: ============ FORMULA File ============ In addition to the formula itself, a ``FORMULA`` file must exist which describes the package. An example of this file is: .. code-block:: yaml name: apache os: RedHat, Debian, Ubuntu, SUSE, FreeBSD os_family: RedHat, Debian, Suse, FreeBSD version: 201506 release: 2 summary: Formula for installing Apache description: Formula for installing Apache Required Fields ``````````````` This file must contain at least the following fields: name ~~~~ The name of the package, as it will appear in the package filename, in the repository metadata, and the package database. Even if the source formula has ``-formula`` in its name, this name should probably not include that. For instance, when packaging the ``apache-formula``, the name should be set to ``apache``. os ~~ The value of the ``os`` grain that this formula supports. This is used to help users know which operating systems can support this package. os_family ~~~~~~~~~ The value of the ``os_family`` grain that this formula supports. This is used to help users know which operating system families can support this package. version ~~~~~~~ The version of the package. While it is up to the organization that manages this package, it is suggested that this version is specified in a ``YYYYMM`` format. For instance, if this version was released in June 2015, the package version should be ``201506``. If multiple releases are made in a month, the ``release`` field should be used. minimum_version ~~~~~~~~~~~~~~~ Minimum recommended version of Salt to use this formula. Not currently enforced. release ~~~~~~~ This field refers primarily to a release of a version, but also to multiple versions within a month. In general, if a version has been made public, and immediate updates need to be made to it, this field should also be updated. summary ~~~~~~~ A one-line description of the package. description ~~~~~~~~~~~ A more detailed description of the package which can contain more than one line. Optional Fields ``````````````` The following fields may also be present. top_level_dir ~~~~~~~~~~~~~ This field is optional, but highly recommended. If it is not specified, the package name will be used. Formula repositories typically do not store ``.sls`` files in the root of the repository; instead they are stored in a subdirectory. For instance, an ``apache-formula`` repository would contain a directory called ``apache``, which would contain an ``init.sls``, plus a number of other related files. In this instance, the ``top_level_dir`` should be set to ``apache``. Files outside the ``top_level_dir``, such as ``README.rst``, ``FORMULA``, and ``LICENSE`` will not be installed. The exceptions to this rule are files that are already treated specially, such as ``pillar.example`` and ``_modules/``. dependencies ~~~~~~~~~~~~ A comma-separated list of packages that must be installed along with this package. When this package is installed, SPM will attempt to discover and install these packages as well. If it is unable to, then it will refuse to install this package. This is useful for creating packages which tie together other packages. For instance, a package called wordpress-mariadb-apache would depend upon wordpress, mariadb, and apache. optional ~~~~~~~~ A comma-separated list of packages which are related to this package, but are neither required nor necessarily recommended. This list is displayed in an informational message when the package is installed to SPM. recommended ~~~~~~~~~~~ A comma-separated list of optional packages that are recommended to be installed with the package. This list is displayed in an informational message when the package is installed to SPM. files ~~~~~ A files section can be added, to specify a list of files to add to the SPM. Such a section might look like: .. code-block:: yaml files: - _pillar - FORMULA - _runners - d|mymodule/index.rst - r|README.rst When ``files`` are specified, then only those files will be added to the SPM, regardless of what other files exist in the directory. They will also be added in the order specified, which is useful if you have a need to lay down files in a specific order. As can be seen in the example above, you may also tag files as being a specific type. This is done by pre-pending a filename with its type, followed by a pipe (``|``) character. The above example contains a document file and a readme. The available file types are: * ``c``: config file * ``d``: documentation file * ``g``: ghost file (i.e. the file contents are not included in the package payload) * ``l``: license file * ``r``: readme file * ``s``: SLS file * ``m``: Salt module The first 5 of these types (``c``, ``d``, ``g``, ``l``, ``r``) will be placed in ``/usr/share/salt/spm/`` by default. This can be changed by setting an ``spm_share_dir`` value in your ``/etc/salt/spm`` configuration file. The last two types (``s`` and ``m``) are currently ignored, but they are reserved for future use. Pre and Post States ------------------- It is possible to run Salt states before and after installing a package by using pre and post states. The following sections may be declared in a ``FORMULA``: * ``pre_local_state`` * ``pre_tgt_state`` * ``post_local_state`` * ``post_tgt_state`` Sections with ``pre`` in their name are evaluated before a package is installed and sections with ``post`` are evaluated after a package is installed. ``local`` states are evaluated before ``tgt`` states. Each of these sections needs to be evaluated as text, rather than as YAML. Consider the following block: .. code-block:: yaml pre_local_state: > echo test > /tmp/spmtest: cmd: - run Note that this declaration uses ``>`` after ``pre_local_state``. This is a YAML marker that marks the next multi-line block as text, including newlines. It is important to use this marker whenever declaring ``pre`` or ``post`` states, so that the text following it can be evaluated properly. local States ~~~~~~~~~~~~ ``local`` states are evaluated locally; this is analogous to issuing a state run using a ``salt-call --local`` command. These commands will be issued on the local machine running the ``spm`` command, whether that machine is a master or a minion. ``local`` states do not require any special arguments, but they must still use the ``>`` marker to denote that the state is evaluated as text, not a data structure. .. code-block:: yaml pre_local_state: > echo test > /tmp/spmtest: cmd: - run tgt States ~~~~~~~~~~ ``tgt`` states are issued against a remote target. This is analogous to issuing a state using the ``salt`` command. As such it requires that the machine that the ``spm`` command is running on is a master. Because ``tgt`` states require that a target be specified, their code blocks are a little different. Consider the following state: .. code-block:: yaml pre_tgt_state: tgt: '*' data: > echo test > /tmp/spmtest: cmd: - run With ``tgt`` states, the state data is placed under a ``data`` section, inside the ``*_tgt_state`` code block. The target is of course specified as a ``tgt`` and you may also optionally specify a ``tgt_type`` (the default is ``glob``). You still need to use the ``>`` marker, but this time it follows the ``data`` line, rather than the ``*_tgt_state`` line. Templating States ~~~~~~~~~~~~~~~~~ The reason that state data must be evaluated as text rather than a data structure is because that state data is first processed through the rendering engine, as it would be with a standard state run. This means that you can use Jinja or any other supported renderer inside of Salt. All formula variables are available to the renderer, so you can reference ``FORMULA`` data inside your state if you need to: .. code-block:: yaml pre_tgt_state: tgt: '*' data: > echo {{ name }} > /tmp/spmtest: cmd: - run You may also declare your own variables inside the ``FORMULA``. If SPM doesn't recognize them then it will ignore them, so there are no restrictions on variable names, outside of avoiding reserved words. By default the renderer is set to ``jinja|yaml``. You may change this by changing the ``renderer`` setting in the ``FORMULA`` itself. Building a Package ------------------ Once a ``FORMULA`` file has been created, it is placed into the root of the formula that is to be turned into a package. The ``spm build`` command is used to turn that formula into a package: .. code-block:: bash spm build /path/to/saltstack-formulas/apache-formula The resulting file will be placed in the build directory. By default this directory is located at ``/srv/spm/``. Loader Modules ============== When an execution module is placed in ``<file_roots>/_modules/`` on the master, it will automatically be synced to minions, the next time a sync operation takes place. Other modules are also propagated this way: state modules can be placed in ``_states/``, and so on. When SPM detects a file in a package which resides in one of these directories, that directory will be placed in ``<file_roots>`` instead of in the formula directory with the rest of the files. Removing Packages ================= Packages may be removed once they are installed using the ``spm remove`` command. .. code-block:: bash spm remove apache If files have been modified, they will not be removed. Empty directories will also be removed. Technical Information ===================== Packages are built using BZ2-compressed tarballs. By default, the package database is stored using the ``sqlite3`` driver (see Loader Modules below). Support for these are built into Python, and so no external dependencies are needed. All other files belonging to SPM use YAML, for portability and ease of use and maintainability. SPM-Specific Loader Modules =========================== SPM was designed to behave like traditional package managers, which apply files to the filesystem and store package metadata in a local database. However, because modern infrastructures often extend beyond those use cases, certain parts of SPM have been broken out into their own set of modules. Package Database ---------------- By default, the package database is stored using the ``sqlite3`` module. This module was chosen because support for SQLite3 is built into Python itself. Please see the SPM Development Guide for information on creating new modules for package database management. Package Files ------------- By default, package files are installed using the ``local`` module. This module applies files to the local filesystem, on the machine that the package is installed on. Please see the :ref:`SPM Development Guide <spm-development>` for information on creating new modules for package file management. Types of Packages ================= SPM supports different types of formula packages. The function of each package is denoted by its name. For instance, packages which end in ``-formula`` are considered to be Salt States (the most common type of formula). Packages which end in ``-conf`` contain configuration which is to be placed in the ``/etc/salt/`` directory. Packages which do not contain one of these names are treated as if they have a ``-formula`` name. formula ------- By default, most files from this type of package live in the ``/srv/spm/salt/`` directory. The exception is the ``pillar.example`` file, which will be renamed to ``<package_name>.sls`` and placed in the pillar directory (``/srv/spm/pillar/`` by default). reactor ------- By default, files from this type of package live in the ``/srv/spm/reactor/`` directory. conf ---- The files in this type of package are configuration files for Salt, which normally live in the ``/etc/salt/`` directory. Configuration files for packages other than Salt can and should be handled with a Salt State (using a ``formula`` type of package).
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/spm/spm_formula.rst
0.759761
0.802285
spm_formula.rst
pypi
.. _pull_requests: Pull Requests ============= Salt is a large software project with many developers working together. We encourage all Salt users to contribute new features, bug fixes and documentation fixes. For those who haven't contributed to a large software project before we encourage you to consider the following questions when preparing a pull request. This isn't an exhaustive list and these aren't necessarily hard and fast rules, but these are things we consider when reviewing a pull request. * Does this change work on all platforms? In cases where it does not, is an appropriate and easy-to-understand reason presented to the user? Is it documented as-such? Have we thought about all the possible ways this code might be used and accounted as best we can for them? * Will this code work on versions of all Python we support? Will it work on future versions? * Are Python reserved keywords used? Are variables named in a way that will make it easy for the next person to understand what's going on? * Does this code present a security risk in any way? What is the worst possible thing that an attacker could do with this code? If dangerous cases are possible, is it appropriate to document them? If so, has this been done? Would this change pass muster with a professional security audit? Is it obvious to a person using this code what the risks are? * Is it readable? Does it conform to our `style guide`_? Is the code documented such that the next person who comes along will be able to read and understand it? Most especially, are edge-cases documented to avoid regressions? Will it be immediately evident to the next person who comes along why this change was made? .. _`style guide`: https://docs.saltproject.io/en/latest/topics/development/conventions/style.html * If appropriate, has the person who wrote the code which is being modified been notified and included in the process? * What are the performance implications of this change? Is there a more efficient way to structure the logic and if so, does making the change balance itself against readability in a sensible way? Do the performance characteristics of the code change based on the way it is being invoked (i.e., through an API or various command-line tools.) Will it be easy to profile this change if it might be a problem? * Are caveats considered and documented in the change? * Will the code scale? More critically, will it scale in *both* directions? Salt runs in data-centers and on Raspberry Pi installations in the Sahara. It needs to work on big servers and tiny devices. * Is appropriate documentation written both in public-facing docs and in-line? How will the user know how to use this? What will they do if it doesn't work as expected? Is this something a new user will understand? Can a user know all they need to about this functionality by reading the public docs? * Is this a change in behavior? If so, is it in the appropriate branch? Are deprecation warnings necessary? Have those changes been fully documented? Have we fully thought through what implications a change in behavior might have? * How has the code been tested? If appropriate are there automated tests which cover this? Is it likely to regress? If so, how has the potential of that regression been mitigated? What is the plan for ensuring that this code works going forward? * If it's asynchronous code, what is the potential for a race condition? * Is this code an original work? If it's borrowed from another project or found online are the appropriate licensing/attribution considerations handled? * Is the reason for the change fully explained in the PR? If not for review, this is necessary so that somebody in the future can go back and figure out why it was necessary. * Is the intended behavior of the change clear? How will that behavior be known to future contributors and to users? * Does this code handle errors in a reasonable way? Have we gone back through the stack as much as possible to make sure that an error cannot be raised that we do not account for? Are errors tested for as well as proper functionality? * If the code relies on external libraries, do we properly handle old versions of them? Do we require a specific version and if so is this version check implemented? Is the library available on the same platforms that module in question claims to support? If the code was written and tested against a particular library, have we documented that fact? * Can this code freeze/hang/crash a running daemon? Can it stall a state run? Are there infinite loops? Are appropriate timeouts implemented? * Is the function interface well documented? If argument types can not be inferred by introspection, are they documented? * Are resources such as file-handles cleaned-up after they are used? * Is it possible that a reference-cycle exists between objects that will leak memory? * Has the code been linted and does it pass all tests? * Does the change fully address the problem or is it limited to a small surface area? By this, I mean that it should be clear that the submitter has looked for other cases in the function or module where the given case might also be addressed. If additional changes are necessary are they documented in the code as a FIXME or the PR and in Github as an issue to be tracked? * Will the code throw errors/warnings/stacktraces to the console during normal operation? * Has all the debugging been removed? * Does the code log any sensitive data? Does it show sensitive data in process lists? Does it store sensitive data to disk and if so, does it do so in a secure manner? Are there potential race conditions in between writing the data to disk and setting the appropriate permissions? * Is it clear from the solution that the problem is well-understood? How can somebody who has never seen the problem feel confident that this proposed change is the best one? * What's hard-coded that might not need to be? Are we making sensible decisions for the user and allowing them to tune and change things where appropriate? * Are utility functions used where appropriate? Does this change re-implement something we already have code for? * Is the right thing being fixed? There are cases where it's appropriate to fix a test and cases where it's appropriate to fix the code that's under test. Which is best for the user? Is this change a shortcut or a solution that will be solid in the months and years to come? * How will this code react to changes elsewhere in the code base? What is it coupled to and have we fully thought through how best to present a coherent interface to consumers of a given function or method? * Does this PR try to fix too many bugs/problems at once? * Should this be split into multiple PRs to make them easier to test and reason about? Pull Request Requirements ========================= The following outlines what is required before a pull request can be merged into the salt project. For each of these requirements, an exception can be made that requires 3 approvals before merge. The exceptions are detailed more below. All PR requirements ------------------- * Approval Required: approval review from core team member OR 1 approval review from captain of working group * Cannot merge your own PR until 1 reviewer approves from defined list above that is not the author. * All Tests Pass Bug Fix PR requirements ----------------------- * Test Coverage: regression test written to cover bug fix. Contributors only need to write test coverage for their specific changes. * Point to the issue the PR is resolving. If there is not an issue one will need to be created. Feature PR requirements ----------------------- * Test Coverage: tests written to cover new feature. Contributors only need to write test coverage for their specific changes. * Release Notes: Add note in release notes of new feature for relative release. * Add .. versionadded:: <release> to module's documentation. If you are not certain which release your fix will be included in you can include TBD and the PR reviewer will let you know the correct name of the release you need to update to the versionadded. Exceptions to all requirements ------------------------------ As previously stated, all of the above requirements can be bypassed with 3 approvals. PR's that do not require tests include: * documentation * cosmetic changes (for example changing from log.debug to log.trace) * fixing tests * pylint * changes outside of the salt directory
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/development/pull_requests.rst
0.604632
0.740937
pull_requests.rst
pypi
================================ Using the Salt Modules for Cloud ================================ In addition to the ``salt-cloud`` command, Salt Cloud can be called from Salt, in a variety of different ways. Most users will be interested in either the execution module or the state module, but it is also possible to call Salt Cloud as a runner. Because the actual work will be performed on a remote minion, the normal Salt Cloud configuration must exist on any target minion that needs to execute a Salt Cloud command. Because Salt Cloud now supports breaking out configuration into individual files, the configuration is easily managed using Salt's own ``file.managed`` state function. For example, the following directories allow this configuration to be managed easily: .. code-block:: yaml /etc/salt/cloud.providers.d/ /etc/salt/cloud.profiles.d/ Minion Keys ----------- Keep in mind that when creating minions, Salt Cloud will create public and private minion keys, upload them to the minion, and place the public key on the machine that created the minion. It will *not* attempt to place any public minion keys on the master, unless the minion which was used to create the instance is also the Salt Master. This is because granting arbitrary minions access to modify keys on the master is a serious security risk, and must be avoided. Execution Module ---------------- The ``cloud`` module is available to use from the command line. At the moment, almost every standard Salt Cloud feature is available to use. The following commands are available: list_images ~~~~~~~~~~~ This command is designed to show images that are available to be used to create an instance using Salt Cloud. In general they are used in the creation of profiles, but may also be used to create an instance directly (see below). Listing images requires a provider to be configured, and specified: .. code-block:: bash salt myminion cloud.list_images my-cloud-provider list_sizes ~~~~~~~~~~ This command is designed to show sizes that are available to be used to create an instance using Salt Cloud. In general they are used in the creation of profiles, but may also be used to create an instance directly (see below). This command is not available for all cloud providers; see the provider-specific documentation for details. Listing sizes requires a provider to be configured, and specified: .. code-block:: bash salt myminion cloud.list_sizes my-cloud-provider list_locations ~~~~~~~~~~~~~~ This command is designed to show locations that are available to be used to create an instance using Salt Cloud. In general they are used in the creation of profiles, but may also be used to create an instance directly (see below). This command is not available for all cloud providers; see the provider-specific documentation for details. Listing locations requires a provider to be configured, and specified: .. code-block:: bash salt myminion cloud.list_locations my-cloud-provider query ~~~~~ This command is used to query all configured cloud providers, and display all instances associated with those accounts. By default, it will run a standard query, returning the following fields: ``id`` The name or ID of the instance, as used by the cloud provider. ``image`` The disk image that was used to create this instance. ``private_ips`` Any public IP addresses currently assigned to this instance. ``public_ips`` Any private IP addresses currently assigned to this instance. ``size`` The size of the instance; can refer to RAM, CPU(s), disk space, etc., depending on the cloud provider. ``state`` The running state of the instance; for example, ``running``, ``stopped``, ``pending``, etc. This state is dependent upon the provider. This command may also be used to perform a full query or a select query, as described below. The following usages are available: .. code-block:: bash salt myminion cloud.query salt myminion cloud.query list_nodes salt myminion cloud.query list_nodes_full full_query ~~~~~~~~~~ This command behaves like the ``query`` command, but lists all information concerning each instance as provided by the cloud provider, in addition to the fields returned by the ``query`` command. .. code-block:: bash salt myminion cloud.full_query select_query ~~~~~~~~~~~~ This command behaves like the ``query`` command, but only returned select fields as defined in the ``/etc/salt/cloud`` configuration file. A sample configuration for this section of the file might look like: .. code-block:: yaml query.selection: - id - key_name This configuration would only return the ``id`` and ``key_name`` fields, for those cloud providers that support those two fields. This would be called using the following command: .. code-block:: bash salt myminion cloud.select_query profile ~~~~~~~ This command is used to create an instance using a profile that is configured on the target minion. Please note that the profile must be configured before this command can be used with it. .. code-block:: bash salt myminion cloud.profile ec2-centos64-x64 my-new-instance Please note that the execution module does *not* run in parallel mode. Using multiple minions to create instances can effectively perform parallel instance creation. create ~~~~~~ This command is similar to the ``profile`` command, in that it is used to create a new instance. However, it does not require a profile to be pre-configured. Instead, all of the options that are normally configured in a profile are passed directly to Salt Cloud to create the instance: .. code-block:: bash salt myminion cloud.create my-ec2-config my-new-instance \ image=ami-1624987f size='t1.micro' ssh_username=ec2-user \ securitygroup=default delvol_on_destroy=True Please note that the execution module does *not* run in parallel mode. Using multiple minions to create instances can effectively perform parallel instance creation. destroy ~~~~~~~ This command is used to destroy an instance or instances. This command will search all configured providers and remove any instance(s) which matches the name(s) passed in here. The results of this command are *non-reversable* and should be used with caution. .. code-block:: bash salt myminion cloud.destroy myinstance salt myminion cloud.destroy myinstance1,myinstance2 action ~~~~~~ This command implements both the ``action`` and the ``function`` commands used in the standard ``salt-cloud`` command. If one of the standard ``action`` commands is used, an instance name must be provided. If one of the standard ``function`` commands is used, a provider configuration must be named. .. code-block:: bash salt myminion cloud.action start instance=myinstance salt myminion cloud.action show_image provider=my-ec2-config \ image=ami-1624987f The actions available are largely dependent upon the module for the specific cloud provider. The following actions are available for all cloud providers: ``list_nodes`` This is a direct call to the ``query`` function as described above, but is only performed against a single cloud provider. A provider configuration must be included. ``list_nodes_select`` This is a direct call to the ``full_query`` function as described above, but is only performed against a single cloud provider. A provider configuration must be included. ``list_nodes_select`` This is a direct call to the ``select_query`` function as described above, but is only performed against a single cloud provider. A provider configuration must be included. ``show_instance`` This is a thin wrapper around ``list_nodes``, which returns the full information about a single instance. An instance name must be provided. State Module ------------ A subset of the execution module is available through the ``cloud`` state module. Not all functions are currently included, because there is currently insufficient code for them to perform statefully. For example, a command to create an instance may be issued with a series of options, but those options cannot currently be statefully managed. Additional states to manage these options will be released at a later time. cloud.present ~~~~~~~~~~~~~ This state will ensure that an instance is present inside a particular cloud provider. Any option that is normally specified in the ``cloud.create`` execution module and function may be declared here, but only the actual presence of the instance will be managed statefully. .. code-block:: yaml my-instance-name: cloud.present: - cloud_provider: my-ec2-config - image: ami-1624987f - size: 't1.micro' - ssh_username: ec2-user - securitygroup: default - delvol_on_destroy: True cloud.profile ~~~~~~~~~~~~~ This state will ensure that an instance is present inside a particular cloud provider. This function calls the ``cloud.profile`` execution module and function, but as with ``cloud.present``, only the actual presence of the instance will be managed statefully. .. code-block:: yaml my-instance-name: cloud.profile: - profile: ec2-centos64-x64 cloud.absent ~~~~~~~~~~~~ This state will ensure that an instance (identified by name) does not exist in any of the cloud providers configured on the target minion. Please note that this state is *non-reversable* and may be considered especially destructive when issued as a cloud state. .. code-block:: yaml my-instance-name: cloud.absent Runner Module ------------- The ``cloud`` runner module is executed on the master, and performs actions using the configuration and Salt modules on the master itself. This means that any public minion keys will also be properly accepted by the master. Using the functions in the runner module is no different than using those in the execution module, outside of the behavior described in the above paragraph. The following functions are available inside the runner: - list_images - list_sizes - list_locations - query - full_query - select_query - profile - destroy - action Outside of the standard usage of ``salt-run`` itself, commands are executed as usual: .. code-block:: bash salt-run cloud.profile ec2-centos64-x86_64 my-instance-name CloudClient ----------- The execution, state, and runner modules ultimately all use the CloudClient library that ships with Salt. To use the CloudClient library locally (either on the master or a minion), create a client object and issue a command against it: .. code-block:: python import salt.cloud import pprint client = salt.cloud.CloudClient("/etc/salt/cloud") nodes = client.query() pprint.pprint(nodes) Reactor ------- Examples of using the reactor with Salt Cloud are available in the :formula_url:`ec2-autoscale-reactor <ec2-autoscale-reactor>` and :formula_url:`salt-cloud-reactor <salt-cloud-reactor>` formulas.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/cloud/salt.rst
0.903784
0.686817
salt.rst
pypi
.. _salt-cloud-feature-matrix: ============== Feature Matrix ============== A number of features are available in most cloud hosts, but not all are available everywhere. This may be because the feature isn't supported by the cloud host itself, or it may only be that the feature has not yet been added to Salt Cloud. In a handful of cases, it is because the feature does not make sense for a particular cloud provider (Saltify, for instance). This matrix shows which features are available in which cloud hosts, as far as Salt Cloud is concerned. This is not a comprehensive list of all features available in all cloud hosts, and should not be used to make business decisions concerning choosing a cloud host. In most cases, adding support for a feature to Salt Cloud requires only a little effort. Legacy Drivers ============== Both AWS and Rackspace are listed as "Legacy". This is because those drivers have been replaced by other drivers, which are generally the preferred method for working with those hosts. The EC2 driver should be used instead of the AWS driver, when possible. The OpenStack driver should be used instead of the Rackspace driver, unless the user is dealing with instances in "the old cloud" in Rackspace. Note for Developers =================== When adding new features to a particular cloud host, please make sure to add the feature to this table. Additionally, if you notice a feature that is not properly listed here, pull requests to fix them is appreciated. Standard Features ================= These are features that are available for almost every cloud host. .. container:: scrollable +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ | |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|Tencent| | |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |Cloud | +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+=======+ |Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ |destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+ [1] Yes, if salt-api is enabled. [2] Always returns `{}`. Actions ======= These are features that are performed on a specific instance, and require an instance name to be passed in. For example: .. code-block:: bash # salt-cloud -a attach_volume ami.example.com .. container:: scrollable +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|Tencent| | |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |Cloud | +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+=======+ |attach_volume | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |create_attach_volumes |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |del_tags |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |delvol_on_destroy | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |detach_volume | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |disable_term_protect |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |enable_term_protect |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_tags |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |keepvol_on_destroy | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_keypairs | | |Yes | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |rename |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |set_tags |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_delvol_on_destroy | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_term_protect | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |take_action | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ Functions ========= These are features that are performed against a specific cloud provider, and require the name of the provider to be passed in. For example: .. code-block:: bash # salt-cloud -f list_images my_digitalocean .. container:: scrollable +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|Tencent| | |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |Cloud | +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+=======+ |block_device_mappings |Yes | | | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |create_keypair | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |create_volume | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |delete_key | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |delete_keypair | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |delete_volume | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_image | | |Yes | | |Yes | | |Yes | | | | |Yes | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_ip | |Yes | | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_key | |Yes | | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_keyid | | |Yes | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_keypair | |Yes | | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_networkid | |Yes | | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_node | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_password | |Yes | | | | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_size | | |Yes | | |Yes | | | | | | | |Yes | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_spot_config | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |get_subnetid | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |iam_profile |Yes | | |Yes| | | | | | | | | |Yes | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |import_key | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |key_list | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |keyname |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_availability_zones| | | |Yes| | | | | | | | | |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_custom_images | | | | | | | | | | | |Yes | | |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_keys | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |list_vlans | | | | | | | | | | | |Yes |Yes | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |rackconnect | | | | | | | |Yes | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |reformat_node | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |securitygroup |Yes | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |securitygroupid | | | |Yes| | | | | | | | | |Yes | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_image | | | |Yes| | | | |Yes | | | | |Yes |Yes | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_key | | | | | |Yes | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_keypair | | |Yes |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ |show_volume | | | |Yes| | | | | | | | | | | | +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+ [1] Yes, if salt-api is enabled.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/cloud/features.rst
0.782953
0.668322
features.rst
pypi
.. _grains: ====== Grains ====== Salt comes with an interface to derive information about the underlying system. This is called the grains interface, because it presents salt with grains of information. Grains are collected for the operating system, domain name, IP address, kernel, OS type, memory, and many other system properties. The grains interface is made available to Salt modules and components so that the right salt minion commands are automatically available on the right systems. Grain data is relatively static, though if system information changes (for example, if network settings are changed), or if a new value is assigned to a custom grain, grain data is refreshed. .. note:: Grains resolve to lowercase letters. For example, ``FOO``, and ``foo`` target the same grain. Listing Grains ============== Available grains can be listed by using the 'grains.ls' module: .. code-block:: bash salt '*' grains.ls Grains data can be listed by using the 'grains.items' module: .. code-block:: bash salt '*' grains.items .. _static-custom-grains: Using grains in a state ======================= To use a grain in a state you can access it via `{{ grains['key'] }}`. Grains in the Minion Config =========================== Grains can also be statically assigned within the minion configuration file. Just add the option :conf_minion:`grains` and pass options to it: .. code-block:: yaml grains: roles: - webserver - memcache deployment: datacenter4 cabinet: 13 cab_u: 14-15 Then status data specific to your servers can be retrieved via Salt, or used inside of the State system for matching. It also makes it possible to target based on specific data about your deployment, as in the example above. Grains in /etc/salt/grains ========================== If you do not want to place your custom static grains in the minion config file, you can also put them in ``/etc/salt/grains`` on the minion. They are configured in the same way as in the above example, only without a top-level ``grains:`` key: .. code-block:: yaml roles: - webserver - memcache deployment: datacenter4 cabinet: 13 cab_u: 14-15 .. note:: Grains in ``/etc/salt/grains`` are ignored if you specify the same grains in the minion config. .. note:: Grains are static, and since they are not often changed, they will need a grains refresh when they are updated. You can do this by calling: ``salt minion saltutil.refresh_modules`` .. note:: You can equally configure static grains for Proxy Minions. As multiple Proxy Minion processes can run on the same machine, you need to index the files using the Minion ID, under ``/etc/salt/proxy.d/<minion ID>/grains``. For example, the grains for the Proxy Minion ``router1`` can be defined under ``/etc/salt/proxy.d/router1/grains``, while the grains for the Proxy Minion ``switch7`` can be put in ``/etc/salt/proxy.d/switch7/grains``. Matching Grains in the Top File =============================== With correctly configured grains on the Minion, the :term:`top file <Top File>` used in Pillar or during Highstate can be made very efficient. For example, consider the following configuration: .. code-block:: yaml 'roles:webserver': - match: grain - state0 'roles:memcache': - match: grain - state1 - state2 For this example to work, you would need to have defined the grain ``role`` for the minions you wish to match. .. _writing-grains: Writing Grains ============== .. include:: ../../_incl/grains_passwords.rst The grains are derived by executing all of the "public" functions (i.e. those which do not begin with an underscore) found in the modules located in the Salt's core grains code, followed by those in any custom grains modules. The functions in a grains module must return a :ref:`Python dictionary <python:typesmapping>`, where the dictionary keys are the names of grains, and each key's value is that value for that grain. Custom grains modules should be placed in a subdirectory named ``_grains`` located under the :conf_master:`file_roots` specified by the master config file. The default path would be ``/srv/salt/_grains``. Custom grains modules will be distributed to the minions when :mod:`state.highstate <salt.modules.state.highstate>` is run, or by executing the :mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or :mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions. Grains modules are easy to write, and (as noted above) only need to return a dictionary. For example: .. code-block:: python def yourfunction(): # initialize a grains dictionary grains = {} # Some code for logic that sets grains like grains["yourcustomgrain"] = True grains["anothergrain"] = "somevalue" return grains The name of the function does not matter and will not factor into the grains data at all; only the keys/values returned become part of the grains. When to Use a Custom Grain -------------------------- Before adding new grains, consider what the data is and remember that grains should (for the most part) be static data. If the data is something that is likely to change, consider using :ref:`Pillar <pillar>` or an execution module instead. If it's a simple set of key/value pairs, pillar is a good match. If compiling the information requires that system commands be run, then putting this information in an execution module is likely a better idea. Good candidates for grains are data that is useful for targeting minions in the :ref:`top file <states-top>` or the Salt CLI. The name and data structure of the grain should be designed to support many platforms, operating systems or applications. Also, keep in mind that Jinja templating in Salt supports referencing pillar data as well as invoking functions from execution modules, so there's no need to place information in grains to make it available to Jinja templates. For example: .. code-block:: text ... ... {{ salt['module.function_name']('argument_1', 'argument_2') }} {{ pillar['my_pillar_key'] }} ... ... .. warning:: Custom grains will not be available in the top file until after the first :ref:`highstate <running-highstate>`. To make custom grains available on a minion's first highstate, it is recommended to use :ref:`this example <minion-start-reactor>` to ensure that the custom grains are synced when the minion starts. Loading Custom Grains --------------------- If you have multiple functions specifying grains that are called from a ``main`` function, be sure to prepend grain function names with an underscore. This prevents Salt from including the loaded grains from the grain functions in the final grain data structure. For example, consider this custom grain file: .. code-block:: python #!/usr/bin/env python def _my_custom_grain(): my_grain = {"foo": "bar", "hello": "world"} return my_grain def main(): # initialize a grains dictionary grains = {} grains["my_grains"] = _my_custom_grain() return grains The output of this example renders like so: .. code-block:: console # salt-call --local grains.items local: ---------- <Snipped for brevity> my_grains: ---------- foo: bar hello: world However, if you don't prepend the ``my_custom_grain`` function with an underscore, the function will be rendered twice by Salt in the items output: once for the ``my_custom_grain`` call itself, and again when it is called in the ``main`` function: .. code-block:: console # salt-call --local grains.items local: ---------- <Snipped for brevity> foo: bar <Snipped for brevity> hello: world <Snipped for brevity> my_grains: ---------- foo: bar hello: world Precedence ========== Core grains can be overridden by custom grains. As there are several ways of defining custom grains, there is an order of precedence which should be kept in mind when defining them. The order of evaluation is as follows: 1. Core grains. 2. Custom grains in ``/etc/salt/grains``. 3. Custom grains in ``/etc/salt/minion``. 4. Custom grain modules in ``_grains`` directory, synced to minions. Each successive evaluation overrides the previous ones, so any grains defined by custom grains modules synced to minions that have the same name as a core grain will override that core grain. Similarly, grains from ``/etc/salt/minion`` override both core grains and custom grain modules, and grains in ``_grains`` will override *any* grains of the same name. For custom grains, if the function takes an argument ``grains``, then the previously rendered grains will be passed in. Because the rest of the grains could be rendered in any order, the only grains that can be relied upon to be passed in are ``core`` grains. This was added in the 2019.2.0 release. Examples of Grains ================== The core module in the grains package is where the main grains are loaded by the Salt minion and provides the principal example of how to write grains: :blob:`salt/grains/core.py` Syncing Grains ============== Syncing grains can be done a number of ways. They are automatically synced when :mod:`state.highstate <salt.modules.state.highstate>` is called, or (as noted above) the grains can be manually synced and reloaded by calling the :mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or :mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions. .. note:: When the :conf_minion:`grains_cache` is set to False, the grains dictionary is built and stored in memory on the minion. Every time the minion restarts or ``saltutil.refresh_grains`` is run, the grain dictionary is rebuilt from scratch.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/grains/index.rst
0.900645
0.697297
index.rst
pypi
.. _syndic: =========== Salt Syndic =========== The most basic or typical Salt topology consists of a single Master node controlling a group of Minion nodes. An intermediate node type, called Syndic, when used offers greater structural flexibility and scalability in the construction of Salt topologies than topologies constructed only out of Master and Minion node types. A Syndic node can be thought of as a special passthrough Minion node. A Syndic node consists of a ``salt-syndic`` daemon and a ``salt-master`` daemon running on the same system. The ``salt-master`` daemon running on the Syndic node controls a group of lower level Minion nodes and the ``salt-syndic`` daemon connects higher level Master node, sometimes called a Master of Masters. The ``salt-syndic`` daemon relays publications and events between the Master node and the local ``salt-master`` daemon. This gives the Master node control over the Minion nodes attached to the ``salt-master`` daemon running on the Syndic node. .. warning:: Salt does not officially support Syndic and :ref:`external auth or publisher_acl<acl-eauth>`. It's possible that it might work under certain circumstances, but comprehensive support is lacking. See `issue #62618 on GitHub <https://github.com/saltstack/salt/issues/62618>`_ for more information. Currently Syndic is only expected to work when running Salt as root, though work is scheduled to fix this in Salt 3006 (Sulfur). Configuring the Syndic ====================== To setup a Salt Syndic you need to tell the Syndic node and its Master node about each other. If your Master node is located at ``10.10.0.1``, then your configurations would be: On the Syndic node: .. code-block:: yaml # /etc/salt/master syndic_master: 10.10.0.1 # may be either an IP address or a hostname .. code-block:: yaml # /etc/salt/minion # id is shared by the salt-syndic daemon and a possible salt-minion daemon # on the Syndic node id: my_syndic On the Master node: .. code-block:: yaml # /etc/salt/master order_masters: True The :conf_master:`syndic_master` option tells the Syndic node where to find the Master node in the same way that the :conf_minion:`master` option tells a Minion node where to find a Master node. The :conf_minion:`id` option is used by the ``salt-syndic`` daemon to identify with the Master node and if unset will default to the hostname or IP address of the Syndic just as with a Minion. The :conf_master:`order_masters` option configures the Master node to send extra information with its publications that is needed by Syndic nodes connected directly to it. .. note:: Each Syndic must provide its own ``file_roots`` directory. Files will not be automatically transferred from the Master node. Configuring the Syndic with Multimaster ======================================= .. versionadded:: 2015.5.0 Syndic with Multimaster lets you connect a syndic to multiple masters to provide an additional layer of redundancy in a syndic configuration. Higher level masters should first be configured in a multimaster configuration. See :ref:`Multimaster Tutorial <tutorial-multi-master>`. On the syndic, the :conf_master:`syndic_master` option is populated with a list of the higher level masters. Since each syndic is connected to each master, jobs sent from any master are forwarded to minions that are connected to each syndic. If the ``master_id`` value is set in the master config on the higher level masters, job results are returned to the master that originated the request in a best effort fashion. Events/jobs without a ``master_id`` are returned to any available master. Running the Syndic ================== The ``salt-syndic`` daemon is a separate process that needs to be started in addition to the ``salt-master`` daemon running on the Syndic node. Starting the ``salt-syndic`` daemon is the same as starting the other Salt daemons. The Master node in many ways sees the Syndic as an ordinary Minion node. In particular, the Master will need to accept the Syndic's Minion key as it would for any other Minion. On the Syndic node: .. code-block:: bash # salt-syndic or # service salt-syndic start On the Master node: .. code-block:: bash # salt-key -a my_syndic The Master node will now be able to control the Minion nodes connected to the Syndic. Only the Syndic key will be listed in the Master node's key registry but this also means that key activity between the Syndic's Minions and the Syndic does not encumber the Master node. In this way, the Syndic's key on the Master node can be thought of as a placeholder for the keys of all the Minion and Syndic nodes beneath it, giving the Master node a clear, high level structural view on the Salt cluster. On the Master node: .. code-block:: bash # salt-key -L Accepted Keys: my_syndic Denied Keys: Unaccepted Keys: Rejected Keys: # salt '*' test.version minion_1: 2018.3.4 minion_2: 2018.3.4 minion_4: 2018.3.4 minion_3: 2018.3.4 Topology ======== A Master node (a node which is itself not a Syndic to another higher level Master node) must run a ``salt-master`` daemon and optionally a ``salt-minion`` daemon. A Syndic node must run ``salt-syndic`` and ``salt-master`` daemons and optionally a ``salt-minion`` daemon. A Minion node must run a ``salt-minion`` daemon. When a ``salt-master`` daemon issues a command, it will be received by the Syndic and Minion nodes directly connected to it. A Minion node will process the command in the way it ordinarily would. On a Syndic node, the ``salt-syndic`` daemon will relay the command to the ``salt-master`` daemon running on the Syndic node, which then propagates the command to the Minions and Syndics connected to it. When events and job return data are generated by ``salt-minion`` daemons, they are aggregated by the ``salt-master`` daemon they are connected to, which ``salt-master`` daemon then relays the data back through its ``salt-syndic`` daemon until the data reaches the Master or Syndic node that issued the command. Syndic wait =========== ``syndic_wait`` is a master configuration file setting that specifies the number of seconds the Salt client should wait for additional syndics to check in with their lists of expected minions before giving up. This value defaults to ``5`` seconds. The ``syndic_wait`` setting is necessary because the higher-level master does not have a way of knowing which minions are below the syndics. The higher-level master has its own list of expected minions and the masters below them have their own lists as well, so the Salt client does not know how long to wait for all returns. The ``syndic_wait`` option allows time for all minions to return to the Salt client. .. note:: To reduce the amount of time the CLI waits for Minions to respond, install a Minion on the Syndic or tune the value of the ``syndic_wait`` configuration. While it is possible to run a Syndic without a Minion installed on the same system, it is recommended, for a faster CLI response time, to do so. Without a Minion installed on the Syndic node, the timeout value of ``syndic_wait`` increases significantly - about three-fold. With a Minion installed on the Syndic, the CLI timeout resides at the value defined in ``syndic_wait``. .. note:: If you have a very large infrastructure or many layers of Syndics, you may find that the CLI doesn't wait long enough for the Syndics to return their events. If you think this is the case, you can set the :conf_master:`syndic_wait` value in the Master configs on the Master or Syndic nodes from which commands are executed. The default value is ``5``, and should work for the majority of deployments. In order for a Master or Syndic node to return information from Minions that are below their Syndics, the CLI requires a short wait time in order to allow the Syndics to gather responses from their Minions. This value is defined in the :conf_master:`syndic_wait` config option and has a default of five seconds. Syndic config options ===================== These are the options that can be used to configure a Syndic node. Note that other than ``id``, Syndic config options are placed in the Master config on the Syndic node. - :conf_minion:`id`: Syndic id (shared by the ``salt-syndic`` daemon with a potential ``salt-minion`` daemon on the same system) - :conf_master:`syndic_master`: Master node IP address or hostname - :conf_master:`syndic_master_port`: Master node ret_port - :conf_master:`syndic_log_file`: path to the logfile (absolute or not) - :conf_master:`syndic_pidfile`: path to the pidfile (absolute or not) - :conf_master:`syndic_wait`: time in seconds to wait on returns from this syndic Minion Data Cache ================= Beginning with Salt 2016.11.0, the :ref:`Pluggable Minion Data Cache <pluggable-data-cache>` was introduced. The minion data cache contains the Salt Mine data, minion grains, and minion pillar information cached on the Salt Master. By default, Salt uses the ``localfs`` cache module, but other external data stores can be used instead. Using a pluggable minion cache modules allows for the data stored on a Salt Master about Salt Minions to be replicated on other Salt Masters the Minion is connected to. Please see the :ref:`Minion Data Cache <cache>` documentation for more information and configuration examples.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/topology/syndic.rst
0.801042
0.800731
syndic.rst
pypi
.. _tutorial-macos-walk-through: ====================================================================== The macOS (Maverick) Developer Step By Step Guide To Salt Installation ====================================================================== This document provides a step-by-step guide to installing a Salt cluster consisting of one master, and one minion running on a local VM hosted on macOS. .. note:: This guide is aimed at developers who wish to run Salt in a virtual machine. The official (Linux) walkthrough can be found `here <https://docs.saltproject.io/topics/tutorials/walkthrough.html>`_. The 5 Cent Salt Intro ===================== Since you're here you've probably already heard about Salt, so you already know Salt lets you configure and run commands on hordes of servers easily. Here's a brief overview of a Salt cluster: - Salt works by having a "master" server sending commands to one or multiple "minion" servers. The master server is the "command center". It is going to be the place where you store your configuration files, aka: "which server is the db, which is the web server, and what libraries and software they should have installed". The minions receive orders from the master. Minions are the servers actually performing work for your business. - Salt has two types of configuration files: 1. the "salt communication channels" or "meta" or "config" configuration files (not official names): one for the master (usually is /etc/salt/master , **on the master server**), and one for minions (default is /etc/salt/minion or /etc/salt/minion.conf, **on the minion servers**). Those files are used to determine things like the Salt Master IP, port, Salt folder locations, etc.. If these are configured incorrectly, your minions will probably be unable to receive orders from the master, or the master will not know which software a given minion should install. 2. the "business" or "service" configuration files (once again, not an official name): these are configuration files, ending with ".sls" extension, that describe which software should run on which server, along with particular configuration properties for the software that is being installed. These files should be created in the /srv/salt folder by default, but their location can be changed using ... /etc/salt/master configuration file! .. note:: This tutorial contains a third important configuration file, not to be confused with the previous two: the virtual machine provisioning configuration file. This in itself is not specifically tied to Salt, but it also contains some Salt configuration. More on that in step 3. Also note that all configuration files are YAML files. So indentation matters. .. note:: Salt also works with "masterless" configuration where a minion is autonomous (in which case salt can be seen as a local configuration tool), or in "multiple master" configuration. See the documentation for more on that. Before Digging In, The Architecture Of The Salt Cluster ------------------------------------------------------- Salt Master *********** The "Salt master" server is going to be the Mac OS machine, directly. Commands will be run from a terminal app, so Salt will need to be installed on the Mac. This is going to be more convenient for toying around with configuration files. Salt Minion *********** We'll only have one "Salt minion" server. It is going to be running on a Virtual Machine running on the Mac, using VirtualBox. It will run an Ubuntu distribution. Step 1 - Configuring The Salt Master On Your Mac ================================================ See the `Salt install guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_ for macOS installation instructions. Because Salt has a lot of dependencies that are not built in macOS, we will use Homebrew to install Salt. Homebrew is a package manager for Mac, it's great, use it (for this tutorial at least!). Some people spend a lot of time installing libs by hand to better understand dependencies, and then realize how useful a package manager is once they're configuring a brand new machine and have to do it all over again. It also lets you *uninstall* things easily. .. note:: Brew is a Ruby program (Ruby is installed by default with your Mac). Brew downloads, compiles, and links software. The linking phase is when compiled software is deployed on your machine. It may conflict with manually installed software, especially in the /usr/local directory. It's ok, remove the manually installed version then refresh the link by typing ``brew link 'packageName'``. Brew has a ``brew doctor`` command that can help you troubleshoot. It's a great command, use it often. Brew requires xcode command line tools. When you run brew the first time it asks you to install them if they're not already on your system. Brew installs software in /usr/local/bin (system bins are in /usr/bin). In order to use those bins you need your $PATH to search there first. Brew tells you if your $PATH needs to be fixed. .. tip:: Use the keyboard shortcut ``cmd + shift + period`` in the "open" macOS dialog box to display hidden files and folders, such as .profile. Install Homebrew ---------------- Install Homebrew here https://brew.sh/ Or just type .. code-block:: bash ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" Now type the following commands in your terminal (you may want to type ``brew doctor`` after each to make sure everything's fine): .. code-block:: bash brew install python brew install swig brew install zmq .. note:: zmq is ZeroMQ. It's a fantastic library used for server to server network communication and is at the core of Salt efficiency. Install Salt ------------ You should now have everything ready to launch this command: .. code-block:: bash pip install salt .. note:: There should be no need for ``sudo pip install salt``. Brew installed Python for your user, so you should have all the access. In case you would like to check, type ``which python`` to ensure that it's /usr/local/bin/python, and ``which pip`` which should be /usr/local/bin/pip. Now type ``python`` in a terminal then, ``import salt``. There should be no errors. Now exit the Python terminal using ``exit()``. Create The Master Configuration ------------------------------- If the default /etc/salt/master configuration file was not created, copy-paste it from here: https://docs.saltproject.io/en/latest/ref/configuration/examples.html#configuration-examples-master .. note:: ``/etc/salt/master`` is a file, not a folder. Salt Master configuration changes. The Salt master needs a few customization to be able to run on macOS: .. code-block:: bash sudo launchctl limit maxfiles 4096 8192 In the /etc/salt/master file, change max_open_files to 8192 (or just add the line: ``max_open_files: 8192`` (no quote) if it doesn't already exists). You should now be able to launch the Salt master: .. code-block:: bash sudo salt-master --log-level=all There should be no errors when running the above command. .. note:: This command is supposed to be a daemon, but for toying around, we'll keep it running on a terminal to monitor the activity. Now that the master is set, let's configure a minion on a VM. Step 2 - Configuring The Minion VM ================================== The Salt minion is going to run on a Virtual Machine. There are a lot of software options that let you run virtual machines on a mac, But for this tutorial we're going to use VirtualBox. In addition to virtualBox, we will use Vagrant, which allows you to create the base VM configuration. Vagrant lets you build ready to use VM images, starting from an OS image and customizing it using "provisioners". In our case, we'll use it to: * Download the base Ubuntu image * Install salt on that Ubuntu image (Salt is going to be the "provisioner" for the VM). * Launch the VM * SSH into the VM to debug * Stop the VM once you're done. Install VirtualBox ------------------ Go get it here: https://www.virtualbox.org/wiki/Downloads (click on VirtualBox for macOS hosts => x86/amd64) Install Vagrant --------------- Go get it here: https://www.vagrantup.com/downloads.html and choose the latest version (1.3.5 at time of writing), then the .dmg file. Double-click to install it. Make sure the ``vagrant`` command is found when run in the terminal. Type ``vagrant``. It should display a list of commands. Create The Minion VM Folder --------------------------- Create a folder in which you will store your minion's VM. In this tutorial, it's going to be a minion folder in the $home directory. .. code-block:: bash cd $home mkdir minion Initialize Vagrant ------------------ From the minion folder, type .. code-block:: bash vagrant init This command creates a default Vagrantfile configuration file. This configuration file will be used to pass configuration parameters to the Salt provisioner in Step 3. Import Precise64 Ubuntu Box --------------------------- .. code-block:: bash vagrant box add precise64 http://files.vagrantup.com/precise64.box .. note:: This box is added at the global Vagrant level. You only need to do it once as each VM will use this same file. Modify the Vagrantfile ---------------------- Modify ./minion/Vagrantfile to use th precise64 box. Change the ``config.vm.box`` line to: .. code-block:: yaml config.vm.box = "precise64" Uncomment the line creating a host-only IP. This is the ip of your minion (you can change it to something else if that IP is already in use): .. code-block:: yaml config.vm.network :private_network, ip: "192.168.33.10" At this point you should have a VM that can run, although there won't be much in it. Let's check that. Checking The VM --------------- From the $home/minion folder type: .. code-block:: bash vagrant up A log showing the VM booting should be present. Once it's done you'll be back to the terminal: .. code-block:: bash ping 192.168.33.10 The VM should respond to your ping request. Now log into the VM in ssh using Vagrant again: .. code-block:: bash vagrant ssh You should see the shell prompt change to something similar to ``vagrant@precise64:~$`` meaning you're inside the VM. From there, enter the following: .. code-block:: bash ping 10.0.2.2 .. note:: That ip is the ip of your VM host (the macOS host). The number is a VirtualBox default and is displayed in the log after the Vagrant ssh command. We'll use that IP to tell the minion where the Salt master is. Once you're done, end the ssh session by typing ``exit``. It's now time to connect the VM to the salt master Step 3 - Connecting Master and Minion ===================================== Creating The Minion Configuration File -------------------------------------- Create the ``/etc/salt/minion`` file. In that file, put the following lines, giving the ID for this minion, and the IP of the master: .. code-block:: yaml master: 10.0.2.2 id: 'minion1' file_client: remote Minions authenticate with the master using keys. Keys are generated automatically if you don't provide one and can accept them later on. However, this requires accepting the minion key every time the minion is destroyed or created (which could be quite often). A better way is to create those keys in advance, feed them to the minion, and authorize them once. Preseed minion keys ------------------- From the minion folder on your Mac run: .. code-block:: bash sudo salt-key --gen-keys=minion1 This should create two files: minion1.pem, and minion1.pub. Since those files have been created using sudo, but will be used by vagrant, you need to change ownership: .. code-block:: bash sudo chown youruser:yourgroup minion1.pem sudo chown youruser:yourgroup minion1.pub Then copy the .pub file into the list of accepted minions: .. code-block:: bash sudo cp minion1.pub /etc/salt/pki/master/minions/minion1 Modify Vagrantfile to Use Salt Provisioner ------------------------------------------ Let's now modify the Vagrantfile used to provision the Salt VM. Add the following section in the Vagrantfile (note: it should be at the same indentation level as the other properties): .. code-block:: yaml # salt-vagrant config config.vm.provision :salt do |salt| salt.run_highstate = true salt.minion_config = "/etc/salt/minion" salt.minion_key = "./minion1.pem" salt.minion_pub = "./minion1.pub" end Now destroy the vm and recreate it from the /minion folder: .. code-block:: bash vagrant destroy vagrant up If everything is fine you should see the following message: .. code-block:: bash "Bootstrapping Salt... (this may take a while) Salt successfully configured and installed!" Checking Master-Minion Communication ------------------------------------ To make sure the master and minion are talking to each other, enter the following: .. code-block:: bash sudo salt '*' test.version You should see your minion answering with its salt version. It's now time to do some configuration. Step 4 - Configure Services to Install On the Minion ==================================================== In this step we'll use the Salt master to instruct our minion to install Nginx. Checking the system's original state ------------------------------------ First, make sure that an HTTP server is not installed on our minion. When opening a browser directed at ``http://192.168.33.10/`` You should get an error saying the site cannot be reached. Initialize the top.sls file --------------------------- System configuration is done in ``/srv/salt/top.sls`` (and subfiles/folders), and then applied by running the :py:func:`state.apply <salt.modules.state.apply_>` function to have the Salt master order its minions to update their instructions and run the associated commands. First Create an empty file on your Salt master (macOS machine): .. code-block:: bash touch /srv/salt/top.sls When the file is empty, or if no configuration is found for our minion an error is reported: .. code-block:: bash sudo salt 'minion1' state.apply This should return an error stating: **No Top file or external nodes data matches found**. Create The Nginx Configuration ------------------------------ Now is finally the time to enter the real meat of our server's configuration. For this tutorial our minion will be treated as a web server that needs to have Nginx installed. Insert the following lines into ``/srv/salt/top.sls`` (which should current be empty). .. code-block:: yaml base: 'minion1': - bin.nginx Now create ``/srv/salt/bin/nginx.sls`` containing the following: .. code-block:: yaml nginx: pkg.installed: - name: nginx service.running: - enable: True - reload: True Check Minion State ------------------ Finally, run the :py:func:`state.apply <salt.modules.state.apply_>` function again: .. code-block:: bash sudo salt 'minion1' state.apply You should see a log showing that the Nginx package has been installed and the service configured. To prove it, open your browser and navigate to http://192.168.33.10/, you should see the standard Nginx welcome page. Congratulations! Where To Go From Here ===================== A full description of configuration management within Salt (sls files among other things) is available here: https://docs.saltproject.io/en/latest/index.html#configuration-management
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/walkthrough_macosx.rst
0.818737
0.660258
walkthrough_macosx.rst
pypi
.. _tutorial-salt-at-scale: =================== Using Salt at scale =================== The focus of this tutorial will be building a Salt infrastructure for handling large numbers of minions. This will include tuning, topology, and best practices. For how to install the Salt Master, see the `Salt install guide <https://docs.saltproject.io/salt/install-guide/en/latest/>`_. .. note:: This tutorial is intended for large installations, although these same settings won't hurt, it may not be worth the complexity to smaller installations. When used with minions, the term 'many' refers to at least a thousand and 'a few' always means 500. For simplicity reasons, this tutorial will default to the standard ports used by Salt. The Master ========== The most common problems on the Salt Master are: 1. too many minions authing at once 2. too many minions re-authing at once 3. too many minions re-connecting at once 4. too many minions returning at once 5. too few resources (CPU/HDD) The first three are all "thundering herd" problems. To mitigate these issues we must configure the minions to back-off appropriately when the Master is under heavy load. The fourth is caused by masters with little hardware resources in combination with a possible bug in ZeroMQ. At least that's what it looks like till today (`Issue 118651 <https://github.com/saltstack/salt/issues/11865>`_, `Issue 5948 <https://github.com/saltstack/salt/issues/5948>`_, `Mail thread <https://groups.google.com/forum/#!searchin/salt-users/lots$20of$20minions/salt-users/WxothArv2Do/t12MigMQDFAJ>`_) To fully understand each problem, it is important to understand, how Salt works. Very briefly, the Salt Master offers two services to the minions. - a job publisher on port 4505 - an open port 4506 to receive the minions returns All minions are always connected to the publisher on port 4505 and only connect to the open return port 4506 if necessary. On an idle Master, there will only be connections on port 4505. Too many minions authing ------------------------ When the Minion service is first started up, it will connect to its Master's publisher on port 4505. If too many minions are started at once, this can cause a "thundering herd". This can be avoided by not starting too many minions at once. The connection itself usually isn't the culprit, the more likely cause of master-side issues is the authentication that the Minion must do with the Master. If the Master is too heavily loaded to handle the auth request it will time it out. The Minion will then wait `acceptance_wait_time` to retry. If `acceptance_wait_time_max` is set then the Minion will increase its wait time by the `acceptance_wait_time` each subsequent retry until reaching `acceptance_wait_time_max`. Too many minions re-authing --------------------------- This is most likely to happen in the testing phase of a Salt deployment, when all Minion keys have already been accepted, but the framework is being tested and parameters are frequently changed in the Salt Master's configuration file(s). The Salt Master generates a new AES key to encrypt its publications at certain events such as a Master restart or the removal of a Minion key. If you are encountering this problem of too many minions re-authing against the Master, you will need to recalibrate your setup to reduce the rate of events like a Master restart or Minion key removal (``salt-key -d``). When the Master generates a new AES key, the minions aren't notified of this but will discover it on the next pub job they receive. When the Minion receives such a job it will then re-auth with the Master. Since Salt does minion-side filtering this means that all the minions will re-auth on the next command published on the master-- causing another "thundering herd". This can be avoided by setting the .. code-block:: yaml random_reauth_delay: 60 in the minions configuration file to a higher value and stagger the amount of re-auth attempts. Increasing this value will of course increase the time it takes until all minions are reachable via Salt commands. Too many minions re-connecting ------------------------------ By default the zmq socket will re-connect every 100ms which for some larger installations may be too quick. This will control how quickly the TCP session is re-established, but has no bearing on the auth load. To tune the minions sockets reconnect attempts, there are a few values in the sample configuration file (default values) .. code-block:: yaml recon_default: 1000 recon_max: 5000 recon_randomize: True - recon_default: the default value the socket should use, i.e. 1000. This value is in milliseconds. (1000ms = 1 second) - recon_max: the max value that the socket should use as a delay before trying to reconnect This value is in milliseconds. (5000ms = 5 seconds) - recon_randomize: enables randomization between recon_default and recon_max To tune this values to an existing environment, a few decision have to be made. 1. How long can one wait, before the minions should be online and reachable via Salt? 2. How many reconnects can the Master handle without a syn flood? These questions can not be answered generally. Their answers depend on the hardware and the administrators requirements. Here is an example scenario with the goal, to have all minions reconnect within a 60 second time-frame on a Salt Master service restart. .. code-block:: yaml recon_default: 1000 recon_max: 59000 recon_randomize: True Each Minion will have a randomized reconnect value between 'recon_default' and 'recon_default + recon_max', which in this example means between 1000ms and 60000ms (or between 1 and 60 seconds). The generated random-value will be doubled after each attempt to reconnect (ZeroMQ default behavior). Lets say the generated random value is 11 seconds (or 11000ms). .. code-block:: console reconnect 1: wait 11 seconds reconnect 2: wait 22 seconds reconnect 3: wait 33 seconds reconnect 4: wait 44 seconds reconnect 5: wait 55 seconds reconnect 6: wait time is bigger than 60 seconds (recon_default + recon_max) reconnect 7: wait 11 seconds reconnect 8: wait 22 seconds reconnect 9: wait 33 seconds reconnect x: etc. With a thousand minions this will mean .. code-block:: text 1000/60 = ~16 round about 16 connection attempts a second. These values should be altered to values that match your environment. Keep in mind though, that it may grow over time and that more minions might raise the problem again. Too many minions returning at once ---------------------------------- This can also happen during the testing phase, if all minions are addressed at once with .. code-block:: bash $ salt * disk.usage it may cause thousands of minions trying to return their data to the Salt Master open port 4506. Also causing a flood of syn-flood if the Master can't handle that many returns at once. This can be easily avoided with Salt's batch mode: .. code-block:: bash $ salt * disk.usage -b 50 This will only address 50 minions at once while looping through all addressed minions. Too few resources ================= The masters resources always have to match the environment. There is no way to give good advise without knowing the environment the Master is supposed to run in. But here are some general tuning tips for different situations: The Master is CPU bound ----------------------- Salt uses RSA-Key-Pairs on the masters and minions end. Both generate 4096 bit key-pairs on first start. While the key-size for the Master is currently not configurable, the minions keysize can be configured with different key-sizes. For example with a 2048 bit key: .. code-block:: yaml keysize: 2048 With thousands of decryptions, the amount of time that can be saved on the masters end should not be neglected. See here for reference: `Pull Request 9235 <https://github.com/saltstack/salt/pull/9235>`_ how much influence the key-size can have. Downsizing the Salt Master's key is not that important, because the minions do not encrypt as many messages as the Master does. In installations with large or with complex pillar files, it is possible for the master to exhibit poor performance as a result of having to render many pillar files at once. This exhibit itself in a number of ways, both as high load on the master and on minions which block on waiting for their pillar to be delivered to them. To reduce pillar rendering times, it is possible to cache pillars on the master. To do this, see the set of master configuration options which are prefixed with `pillar_cache`. If many pillars are encrypted using :mod:`gpg <salt.renderers.gpg>` renderer, it is possible to cache GPG data. To do this, see the set of master configuration options which are prefixed with `gpg_cache`. .. note:: Caching pillars or GPG data on the master may introduce security considerations. Be certain to read caveats outlined in the master configuration file to understand how pillar caching may affect a master's ability to protect sensitive data! The Master is disk IO bound --------------------------- By default, the Master saves every Minion's return for every job in its job-cache. The cache can then be used later, to lookup results for previous jobs. The default directory for this is: .. code-block:: yaml cachedir: /var/cache/salt and then in the ``/proc`` directory. Each job return for every Minion is saved in a single file. Over time this directory can grow quite large, depending on the number of published jobs. The amount of files and directories will scale with the number of jobs published and the retention time defined by .. code-block:: yaml keep_jobs: 24 .. code-block:: text 250 jobs/day * 2000 minions returns = 500,000 files a day Use and External Job Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~ An external job cache allows for job storage to be placed on an external system, such as a database. - ext_job_cache: this will have the minions store their return data directly into a returner (not sent through the Master) - master_job_cache (New in `2014.7.0`): this will make the Master store the job data using a returner (instead of the local job cache on disk). If a master has many accepted keys, it may take a long time to publish a job because the master must first determine the matching minions and deliver that information back to the waiting client before the job can be published. To mitigate this, a key cache may be enabled. This will reduce the load on the master to a single file open instead of thousands or tens of thousands. This cache is updated by the maintanence process, however, which means that minions with keys that are accepted may not be targeted by the master for up to sixty seconds by default. To enable the master key cache, set `key_cache: 'sched'` in the master configuration file. Disable The Job Cache ~~~~~~~~~~~~~~~~~~~~~ The job cache is a central component of the Salt Master and many aspects of the Salt Master will not function correctly without a running job cache. Disabling the job cache is **STRONGLY DISCOURAGED** and should not be done unless the master is being used to execute routines that require no history or reliable feedback! The job cache can be disabled: .. code-block:: yaml job_cache: False
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/intro_scale.rst
0.890559
0.790732
intro_scale.rst
pypi
.. _pillar-walk-through: ================== Pillar Walkthrough ================== .. note:: This walkthrough assumes that the reader has already completed the initial Salt :ref:`walkthrough <tutorial-salt-walk-through>`. Pillars are tree-like structures of data defined on the Salt Master and passed through to minions. They allow confidential, targeted data to be securely sent only to the relevant minion. .. note:: Grains and Pillar are sometimes confused, just remember that Grains are data about a minion which is stored or generated from the minion. This is why information like the OS and CPU type are found in Grains. Pillar is information about a minion or many minions stored or generated on the Salt Master. Pillar data is useful for: Highly Sensitive Data: Information transferred via pillar is guaranteed to only be presented to the minions that are targeted, making Pillar suitable for managing security information, such as cryptographic keys and passwords. Minion Configuration: Minion modules such as the execution modules, states, and returners can often be configured via data stored in pillar. Variables: Variables which need to be assigned to specific minions or groups of minions can be defined in pillar and then accessed inside sls formulas and template files. Arbitrary Data: Pillar can contain any basic data structure in dictionary format, so a key/value store can be defined making it easy to iterate over a group of values in sls formulas. Pillar is therefore one of the most important systems when using Salt. This walkthrough is designed to get a simple Pillar up and running in a few minutes and then to dive into the capabilities of Pillar and where the data is available. Setting Up Pillar ================= The pillar is already running in Salt by default. To see the minion's pillar data: .. code-block:: bash salt '*' pillar.items .. note:: Prior to version 0.16.2, this function is named ``pillar.data``. This function name is still supported for backwards compatibility. By default, the contents of the master configuration file are not loaded into pillar for all minions. This default is stored in the ``pillar_opts`` setting, which defaults to ``False``. The contents of the master configuration file can be made available to minion pillar files. This makes global configuration of services and systems very easy, but note that this may not be desired or appropriate if sensitive data is stored in the master's configuration file. To enable the master configuration file to be available to minion as pillar, set ``pillar_opts: True`` in the master configuration file, and then for appropriate minions also set ``pillar_opts: True`` in the minion(s) configuration file. Similar to the state tree, the pillar is comprised of sls files and has a top file. The default location for the pillar is in /srv/pillar. .. note:: The pillar location can be configured via the ``pillar_roots`` option inside the master configuration file. It must not be in a subdirectory of the state tree or file_roots. If the pillar is under file_roots, any pillar targeting can be bypassed by minions. To start setting up the pillar, the /srv/pillar directory needs to be present: .. code-block:: bash mkdir /srv/pillar Now create a simple top file, following the same format as the top file used for states: ``/srv/pillar/top.sls``: .. code-block:: yaml base: '*': - data This top file associates the data.sls file to all minions. Now the ``/srv/pillar/data.sls`` file needs to be populated: ``/srv/pillar/data.sls``: .. code-block:: yaml info: some data To ensure that the minions have the new pillar data, issue a command to them asking that they fetch their pillars from the master: .. code-block:: bash salt '*' saltutil.refresh_pillar Now that the minions have the new pillar, it can be retrieved: .. code-block:: bash salt '*' pillar.items The key ``info`` should now appear in the returned pillar data. More Complex Data ~~~~~~~~~~~~~~~~~ Unlike states, pillar files do not need to define :strong:`formulas`. This example sets up user data with a UID: ``/srv/pillar/users/init.sls``: .. code-block:: yaml users: thatch: 1000 shouse: 1001 utahdave: 1002 redbeard: 1003 .. note:: The same directory lookups that exist in states exist in pillar, so the file ``users/init.sls`` can be referenced with ``users`` in the :term:`top file <Top File>`. The top file will need to be updated to include this sls file: ``/srv/pillar/top.sls``: .. code-block:: yaml base: '*': - data - users Now the data will be available to the minions. To use the pillar data in a state, you can use Jinja: ``/srv/salt/users/init.sls`` .. code-block:: jinja {% for user, uid in pillar.get('users', {}).items() %} {{user}}: user.present: - uid: {{uid}} {% endfor %} This approach allows for users to be safely defined in a pillar and then the user data is applied in an sls file. Parameterizing States With Pillar ================================= Pillar data can be accessed in state files to customise behavior for each minion. All pillar (and grain) data applicable to each minion is substituted into the state files through templating before being run. Typical uses include setting directories appropriate for the minion and skipping states that don't apply. A simple example is to set up a mapping of package names in pillar for separate Linux distributions: ``/srv/pillar/pkg/init.sls``: .. code-block:: jinja pkgs: {% if grains['os_family'] == 'RedHat' %} apache: httpd vim: vim-enhanced {% elif grains['os_family'] == 'Debian' %} apache: apache2 vim: vim {% elif grains['os'] == 'Arch' %} apache: apache vim: vim {% endif %} The new ``pkg`` sls needs to be added to the top file: ``/srv/pillar/top.sls``: .. code-block:: yaml base: '*': - data - users - pkg Now the minions will auto map values based on respective operating systems inside of the pillar, so sls files can be safely parameterized: ``/srv/salt/apache/init.sls``: .. code-block:: jinja apache: pkg.installed: - name: {{ pillar['pkgs']['apache'] }} Or, if no pillar is available a default can be set as well: .. note:: The function ``pillar.get`` used in this example was added to Salt in version 0.14.0 ``/srv/salt/apache/init.sls``: .. code-block:: jinja apache: pkg.installed: - name: {{ salt['pillar.get']('pkgs:apache', 'httpd') }} In the above example, if the pillar value ``pillar['pkgs']['apache']`` is not set in the minion's pillar, then the default of ``httpd`` will be used. .. note:: Under the hood, pillar is just a Python dict, so Python dict methods such as ``get`` and ``items`` can be used. Pillar Makes Simple States Grow Easily ====================================== One of the design goals of pillar is to make simple sls formulas easily grow into more flexible formulas without refactoring or complicating the states. A simple formula: ``/srv/salt/edit/vim.sls``: .. code-block:: yaml vim: pkg.installed: [] /etc/vimrc: file.managed: - source: salt://edit/vimrc - mode: 644 - user: root - group: root - require: - pkg: vim Can be easily transformed into a powerful, parameterized formula: ``/srv/salt/edit/vim.sls``: .. code-block:: jinja vim: pkg.installed: - name: {{ pillar['pkgs']['vim'] }} /etc/vimrc: file.managed: - source: {{ pillar['vimrc'] }} - mode: 644 - user: root - group: root - require: - pkg: vim Where the vimrc source location can now be changed via pillar: ``/srv/pillar/edit/vim.sls``: .. code-block:: jinja {% if grains['id'].startswith('dev') %} vimrc: salt://edit/dev_vimrc {% elif grains['id'].startswith('qa') %} vimrc: salt://edit/qa_vimrc {% else %} vimrc: salt://edit/vimrc {% endif %} Ensuring that the right vimrc is sent out to the correct minions. The pillar top file must include a reference to the new sls pillar file: ``/srv/pillar/top.sls``: .. code-block:: yaml base: '*': - pkg - edit.vim Setting Pillar Data on the Command Line ======================================= Pillar data can be set on the command line when running :py:func:`state.apply <salt.modules.state.apply_` like so: .. code-block:: bash salt '*' state.apply pillar='{"foo": "bar"}' salt '*' state.apply my_sls_file pillar='{"hello": "world"}' Nested pillar values can also be set via the command line: .. code-block:: bash salt '*' state.sls my_sls_file pillar='{"foo": {"bar": "baz"}}' Lists can be passed via command line pillar data as follows: .. code-block:: bash salt '*' state.sls my_sls_file pillar='{"some_list": ["foo", "bar", "baz"]}' .. note:: If a key is passed on the command line that already exists on the minion, the key that is passed in will overwrite the entire value of that key, rather than merging only the specified value set via the command line. The example below will swap the value for vim with telnet in the previously specified list, notice the nested pillar dict: .. code-block:: bash salt '*' state.apply edit.vim pillar='{"pkgs": {"vim": "telnet"}}' This will attempt to install telnet on your minions, feel free to uninstall the package or replace telnet value with anything else. .. note:: Be aware that when sending sensitive data via pillar on the command-line that the publication containing that data will be received by all minions and will not be restricted to the targeted minions. This may represent a security concern in some cases. More On Pillar ============== Pillar data is generated on the Salt master and securely distributed to minions. Salt is not restricted to the pillar sls files when defining the pillar but can retrieve data from external sources. This can be useful when information about an infrastructure is stored in a separate location. Reference information on pillar and the external pillar interface can be found in the Salt documentation: :ref:`Pillar <pillar>` Minion Config in Pillar ======================= Minion configuration options can be set on pillars. Any option that you want to modify, should be in the first level of the pillars, in the same way you set the options in the config file. For example, to configure the MySQL root password to be used by MySQL Salt execution module: .. code-block:: yaml mysql.pass: hardtoguesspassword This is very convenient when you need some dynamic configuration change that you want to be applied on the fly. For example, there is a chicken and the egg problem if you do this: .. code-block:: yaml mysql-admin-passwd: mysql_user.present: - name: root - password: somepasswd mydb: mysql_db.present The second state will fail, because you changed the root password and the minion didn't notice it. Setting mysql.pass in the pillar, will help to sort out the issue. But always change the root admin password in the first place. This is very helpful for any module that needs credentials to apply state changes: mysql, keystone, etc.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/pillar.rst
0.894176
0.663492
pillar.rst
pypi
.. _starting-states: ========================= How Do I Use Salt States? ========================= Simplicity, Simplicity, Simplicity Many of the most powerful and useful engineering solutions are founded on simple principles. Salt States strive to do just that: K.I.S.S. (Keep It Stupidly Simple) The core of the Salt State system is the SLS, or **S**\ a\ **L**\ t **S**\ tate file. The SLS is a representation of the state in which a system should be in, and is set up to contain this data in a simple format. This is often called configuration management. .. note:: This is just the beginning of using states, make sure to read up on pillar :ref:`Pillar <pillar-walk-through>` next. It is All Just Data =================== Before delving into the particulars, it will help to understand that the SLS file is just a data structure under the hood. While understanding that the SLS is just a data structure isn't critical for understanding and making use of Salt States, it should help bolster knowledge of where the real power is. SLS files are therefore, in reality, just dictionaries, lists, strings, and numbers. By using this approach Salt can be much more flexible. As one writes more state files, it becomes clearer exactly what is being written. The result is a system that is easy to understand, yet grows with the needs of the admin or developer. The Top File ============ The example SLS files in the below sections can be assigned to hosts using a file called :strong:`top.sls`. This file is described in-depth :ref:`here <states-top>`. Default Data - YAML =================== By default Salt represents the SLS data in what is one of the simplest serialization formats available - `YAML`_. A typical SLS file will often look like this in YAML: .. note:: These demos use some generic service and package names, different distributions often use different names for packages and services. For instance `apache` should be replaced with `httpd` on a Red Hat system. Salt uses the name of the init script, systemd name, upstart name etc. based on what the underlying service management for the platform. To get a list of the available service names on a platform execute the service.get_all salt function. Information on how to make states work with multiple distributions is later in the tutorial. .. code-block:: yaml apache: pkg.installed: [] service.running: - require: - pkg: apache This SLS data will ensure that the package named apache is installed, and that the apache service is running. The components can be explained in a simple way. The first line is the ID for a set of data, and it is called the ID Declaration. This ID sets the name of the thing that needs to be manipulated. The second and third lines contain the state module function to be run, in the format ``<state_module>.<function>``. The ``pkg.installed`` state module function ensures that a software package is installed via the system's native package manager. The ``service.running`` state module function ensures that a given system daemon is running. Finally, on line four, is the word ``require``. This is called a Requisite Statement, and it makes sure that the Apache service is only started after a successful installation of the apache package. .. _`YAML`: https://yaml.org/spec/1.1/ Adding Configs and Users ======================== When setting up a service like an Apache web server, many more components may need to be added. The Apache configuration file will most likely be managed, and a user and group may need to be set up. .. code-block:: yaml apache: pkg.installed: [] service.running: - watch: - pkg: apache - file: /etc/httpd/conf/httpd.conf - user: apache user.present: - uid: 87 - gid: 87 - home: /var/www/html - shell: /bin/nologin - require: - group: apache group.present: - gid: 87 - require: - pkg: apache /etc/httpd/conf/httpd.conf: file.managed: - source: salt://apache/httpd.conf - user: root - group: root - mode: 644 This SLS data greatly extends the first example, and includes a config file, a user, a group and new requisite statement: ``watch``. Adding more states is easy, since the new user and group states are under the Apache ID, the user and group will be the Apache user and group. The ``require`` statements will make sure that the user will only be made after the group, and that the group will be made only after the Apache package is installed. Next, the ``require`` statement under service was changed to watch, and is now watching 3 states instead of just one. The watch statement does the same thing as require, making sure that the other states run before running the state with a watch, but it adds an extra component. The ``watch`` statement will run the state's watcher function for any changes to the watched states. So if the package was updated, the config file changed, or the user uid modified, then the service state's watcher will be run. The service state's watcher just restarts the service, so in this case, a change in the config file will also trigger a restart of the respective service. Moving Beyond a Single SLS ========================== When setting up Salt States in a scalable manner, more than one SLS will need to be used. The above examples were in a single SLS file, but two or more SLS files can be combined to build out a State Tree. The above example also references a file with a strange source - ``salt://apache/httpd.conf``. That file will need to be available as well. The SLS files are laid out in a directory structure on the Salt master; an SLS is just a file and files to download are just files. The Apache example would be laid out in the root of the Salt file server like this: .. code-block:: text apache/init.sls apache/httpd.conf So the httpd.conf is just a file in the apache directory, and is referenced directly. .. include:: ../../_incl/sls_filename_cant_contain_period.rst But when using more than one single SLS file, more components can be added to the toolkit. Consider this SSH example: ``ssh/init.sls:`` .. code-block:: yaml openssh-client: pkg.installed /etc/ssh/ssh_config: file.managed: - user: root - group: root - mode: 644 - source: salt://ssh/ssh_config - require: - pkg: openssh-client ``ssh/server.sls:`` .. code-block:: yaml include: - ssh openssh-server: pkg.installed sshd: service.running: - require: - pkg: openssh-client - pkg: openssh-server - file: /etc/ssh/banner - file: /etc/ssh/sshd_config /etc/ssh/sshd_config: file.managed: - user: root - group: root - mode: 644 - source: salt://ssh/sshd_config - require: - pkg: openssh-server /etc/ssh/banner: file: - managed - user: root - group: root - mode: 644 - source: salt://ssh/banner - require: - pkg: openssh-server .. note:: Notice that we use two similar ways of denoting that a file is managed by Salt. In the `/etc/ssh/sshd_config` state section above, we use the `file.managed` state declaration whereas with the `/etc/ssh/banner` state section, we use the `file` state declaration and add a `managed` attribute to that state declaration. Both ways produce an identical result; the first way -- using `file.managed` -- is merely a shortcut. Now our State Tree looks like this: .. code-block:: text apache/init.sls apache/httpd.conf ssh/init.sls ssh/server.sls ssh/banner ssh/ssh_config ssh/sshd_config This example now introduces the ``include`` statement. The include statement includes another SLS file so that components found in it can be required, watched or as will soon be demonstrated - extended. The include statement allows for states to be cross linked. When an SLS has an include statement it is literally extended to include the contents of the included SLS files. Note that some of the SLS files are called init.sls, while others are not. More info on what this means can be found in the :ref:`States Tutorial <sls-file-namespace>`. Extending Included SLS Data =========================== Sometimes SLS data needs to be extended. Perhaps the apache service needs to watch additional resources, or under certain circumstances a different file needs to be placed. In these examples, the first will add a custom banner to ssh and the second will add more watchers to apache to include mod_python. ``ssh/custom-server.sls:`` .. code-block:: yaml include: - ssh.server extend: /etc/ssh/banner: file: - source: salt://ssh/custom-banner ``python/mod_python.sls:`` .. code-block:: yaml include: - apache extend: apache: service: - watch: - pkg: mod_python mod_python: pkg.installed The ``custom-server.sls`` file uses the extend statement to overwrite where the banner is being downloaded from, and therefore changing what file is being used to configure the banner. In the new mod_python SLS the mod_python package is added, but more importantly the apache service was extended to also watch the mod_python package. .. include:: ../../_incl/extend_with_require_watch.rst Understanding the Render System =============================== Since SLS data is simply that (data), it does not need to be represented with YAML. Salt defaults to YAML because it is very straightforward and easy to learn and use. But the SLS files can be rendered from almost any imaginable medium, so long as a renderer module is provided. The default rendering system is the ``jinja|yaml`` renderer. The ``jinja|yaml`` renderer will first pass the template through the `Jinja2`_ templating system, and then through the YAML parser. The benefit here is that full programming constructs are available when creating SLS files. Other renderers available are ``yaml_mako`` and ``yaml_wempy`` which each use the `Mako`_ or `Wempy`_ templating system respectively rather than the jinja templating system, and more notably, the pure Python or ``py``, ``pydsl`` & ``pyobjects`` renderers. The ``py`` renderer allows for SLS files to be written in pure Python, allowing for the utmost level of flexibility and power when preparing SLS data; while the :mod:`pydsl<salt.renderers.pydsl>` renderer provides a flexible, domain-specific language for authoring SLS data in Python; and the :mod:`pyobjects<salt.renderers.pyobjects>` renderer gives you a `"Pythonic"`_ interface to building state data. .. _`Jinja2`: https://jinja.palletsprojects.com/en/2.11.x/ .. _`Mako`: https://www.makotemplates.org/ .. _`Wempy`: https://fossil.secution.com/u/gcw/wempy/doc/tip/README.wiki .. _`"Pythonic"`: https://legacy.python.org/dev/peps/pep-0008/ .. note:: The templating engines described above aren't just available in SLS files. They can also be used in :mod:`file.managed <salt.states.file.managed>` states, making file management much more dynamic and flexible. Some examples for using templates in managed files can be found in the documentation for the :mod:`file state <salt.states.file>`, as well as the :ref:`MooseFS example<jinja-example-moosefs>` below. Getting to Know the Default - jinja|yaml ---------------------------------------- The default renderer - ``jinja|yaml``, allows for use of the jinja templating system. A guide to the Jinja templating system can be found here: https://jinja.palletsprojects.com/en/2.11.x/ When working with renderers a few very useful bits of data are passed in. In the case of templating engine based renderers, three critical components are available, ``salt``, ``grains``, and ``pillar``. The ``salt`` object allows for any Salt function to be called from within the template, and ``grains`` allows for the Grains to be accessed from within the template. A few examples: ``apache/init.sls:`` .. code-block:: jinja apache: pkg.installed: {% if grains['os'] == 'RedHat'%} - name: httpd {% endif %} service.running: {% if grains['os'] == 'RedHat'%} - name: httpd {% endif %} - watch: - pkg: apache - file: /etc/httpd/conf/httpd.conf - user: apache user.present: - uid: 87 - gid: 87 - home: /var/www/html - shell: /bin/nologin - require: - group: apache group.present: - gid: 87 - require: - pkg: apache /etc/httpd/conf/httpd.conf: file.managed: - source: salt://apache/httpd.conf - user: root - group: root - mode: 644 This example is simple. If the ``os`` grain states that the operating system is Red Hat, then the name of the Apache package and service needs to be httpd. .. _jinja-example-moosefs: A more aggressive way to use Jinja can be found here, in a module to set up a MooseFS distributed filesystem chunkserver: ``moosefs/chunk.sls:`` .. code-block:: jinja include: - moosefs {% for mnt in salt['cmd.run']('ls /dev/data/moose*').split() %} /mnt/moose{{ mnt[-1] }}: mount.mounted: - device: {{ mnt }} - fstype: xfs - mkmnt: True file.directory: - user: mfs - group: mfs - require: - user: mfs - group: mfs {% endfor %} /etc/mfshdd.cfg: file.managed: - source: salt://moosefs/mfshdd.cfg - user: root - group: root - mode: 644 - template: jinja - require: - pkg: mfs-chunkserver /etc/mfschunkserver.cfg: file.managed: - source: salt://moosefs/mfschunkserver.cfg - user: root - group: root - mode: 644 - template: jinja - require: - pkg: mfs-chunkserver mfs-chunkserver: pkg.installed: [] mfschunkserver: service.running: - require: {% for mnt in salt['cmd.run']('ls /dev/data/moose*') %} - mount: /mnt/moose{{ mnt[-1] }} - file: /mnt/moose{{ mnt[-1] }} {% endfor %} - file: /etc/mfschunkserver.cfg - file: /etc/mfshdd.cfg - file: /var/lib/mfs This example shows much more of the available power of Jinja. Multiple for loops are used to dynamically detect available hard drives and set them up to be mounted, and the ``salt`` object is used multiple times to call shell commands to gather data. Introducing the Python, PyDSL, and the Pyobjects Renderers ---------------------------------------------------------- Sometimes the chosen default renderer might not have enough logical power to accomplish the needed task. When this happens, the Python renderer can be used. Normally a YAML renderer should be used for the majority of SLS files, but an SLS file set to use another renderer can be easily added to the tree. This example shows a very basic Python SLS file: ``python/django.sls:`` .. code-block:: python #!py def run(): """ Install the django package """ return {"include": ["python"], "django": {"pkg": ["installed"]}} This is a very simple example; the first line has an SLS shebang that tells Salt to not use the default renderer, but to use the ``py`` renderer. Then the run function is defined, the return value from the run function must be a Salt friendly data structure, or better known as a Salt :ref:`HighState data structure<states-highstate>`. Alternatively, using the :mod:`pydsl<salt.renderers.pydsl>` renderer, the above example can be written more succinctly as: .. code-block:: python #!pydsl include("python", delayed=True) state("django").pkg.installed() The :mod:`pyobjects<salt.renderers.pyobjects>` renderer provides an `"Pythonic"`_ object based approach for building the state data. The above example could be written as: .. code-block:: python #!pyobjects include("python") Pkg.installed("django") These Python examples would look like this if they were written in YAML: .. code-block:: yaml include: - python django: pkg.installed This example clearly illustrates that; one, using the YAML renderer by default is a wise decision and two, unbridled power can be obtained where needed by using a pure Python SLS. Running and Debugging Salt States --------------------------------- Once the rules in an SLS are ready, they should be tested to ensure they work properly. To invoke these rules, simply execute ``salt '*' state.apply`` on the command line. If you get back only hostnames with a ``:`` after, but no return, chances are there is a problem with one or more of the sls files. On the minion, use the ``salt-call`` command to examine the output for errors: .. code-block:: bash salt-call state.apply -l debug This should help troubleshoot the issue. The minion can also be started in the foreground in debug mode by running ``salt-minion -l debug``. Next Reading ============ With an understanding of states, the next recommendation is to become familiar with Salt's pillar interface: :ref:`Pillar Walkthrough <pillar-walk-through>`
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/starting_states.rst
0.880065
0.752468
starting_states.rst
pypi
.. _tutorial-multi-master: ===================== Multi Master Tutorial ===================== As of Salt 0.16.0, the ability to connect minions to multiple masters has been made available. The multi-master system allows for redundancy of Salt masters and facilitates multiple points of communication out to minions. When using a multi-master setup, all masters are running hot, and any active master can be used to send commands out to the minions. .. note:: If you need failover capabilities with multiple masters, there is also a MultiMaster-PKI setup available, that uses a different topology `MultiMaster-PKI with Failover Tutorial <https://docs.saltproject.io/en/latest/topics/tutorials/multimaster_pki.html>`_ In 0.16.0, the masters do not share any information, keys need to be accepted on both masters, and shared files need to be shared manually or use tools like the git fileserver backend to ensure that the :conf_master:`file_roots` are kept consistent. Beginning with Salt 2016.11.0, the :ref:`Pluggable Minion Data Cache <pluggable-data-cache>` was introduced. The minion data cache contains the Salt Mine data, minion grains, and minion pillar information cached on the Salt Master. By default, Salt uses the ``localfs`` cache module, but other external data stores can be used instead. Using a pluggable minion cache modules allows for the data stored on a Salt Master about Salt Minions to be replicated on other Salt Masters the Minion is connected to. Please see the :ref:`Minion Data Cache <cache>` documentation for more information and configuration examples. Summary of Steps ---------------- 1. Create a redundant master server 2. Copy primary master key to redundant master 3. Start redundant master 4. Configure minions to connect to redundant master 5. Restart minions 6. Accept keys on redundant master Prepping a Redundant Master --------------------------- The first task is to prepare the redundant master. If the redundant master is already running, stop it. There is only one requirement when preparing a redundant master, which is that masters share the same private key. When the first master was created, the master's identifying key pair was generated and placed in the master's ``pki_dir``. The default location of the master's key pair is ``/etc/salt/pki/master/``. Take the private key, ``master.pem``, and copy it to the same location on the redundant master. Do the same for the master's public key, ``master.pub``. Assuming that no minions have yet been connected to the new redundant master, it is safe to delete any existing key in this location and replace it. .. note:: There is no logical limit to the number of redundant masters that can be used. Once the new key is in place, the redundant master can be safely started. Configure Minions ----------------- Since minions need to be master-aware, the new master needs to be added to the minion configurations. Simply update the minion configurations to list all connected masters: .. code-block:: yaml master: - saltmaster1.example.com - saltmaster2.example.com Now the minion can be safely restarted. .. note:: If the ipc_mode for the minion is set to TCP (default in Windows), then each minion in the multi-minion setup (one per master) needs its own tcp_pub_port and tcp_pull_port. If these settings are left as the default 4510/4511, each minion object will receive a port 2 higher than the previous. Thus the first minion will get 4510/4511, the second will get 4512/4513, and so on. If these port decisions are unacceptable, you must configure tcp_pub_port and tcp_pull_port with lists of ports for each master. The length of these lists should match the number of masters, and there should not be overlap in the lists. Now the minions will check into the original master and also check into the new redundant master. Both masters are first-class and have rights to the minions. .. note:: Minions can automatically detect failed masters and attempt to reconnect to them quickly. To enable this functionality, set `master_alive_interval` in the minion config and specify a number of seconds to poll the masters for connection status. If this option is not set, minions will still reconnect to failed masters but the first command sent after a master comes back up may be lost while the minion authenticates. Sharing Files Between Masters ----------------------------- Salt does not automatically share files between multiple masters. A number of files should be shared or sharing of these files should be strongly considered. Minion Keys ``````````` Minion keys can be accepted the normal way using :strong:`salt-key` on both masters. Keys accepted, deleted, or rejected on one master will NOT be automatically managed on redundant masters; this needs to be taken care of by running salt-key on both masters or sharing the ``/etc/salt/pki/master/{minions,minions_pre,minions_rejected}`` directories between masters. .. note:: While sharing the :strong:`/etc/salt/pki/master` directory will work, it is strongly discouraged, since allowing access to the :strong:`master.pem` key outside of Salt creates a *SERIOUS* security risk. File_Roots `````````` The :conf_master:`file_roots` contents should be kept consistent between masters. Otherwise state runs will not always be consistent on minions since instructions managed by one master will not agree with other masters. The recommended way to sync these is to use a fileserver backend like gitfs or to keep these files on shared storage. .. important:: If using gitfs/git_pillar with the cachedir shared between masters using `GlusterFS`_, nfs, or another network filesystem, and the masters are running Salt 2015.5.9 or later, it is strongly recommended not to turn off :conf_master:`gitfs_global_lock`/:conf_master:`git_pillar_global_lock` as doing so will cause lock files to be removed if they were created by a different master. .. _GlusterFS: http://www.gluster.org/ Pillar_Roots ```````````` Pillar roots should be given the same considerations as :conf_master:`file_roots`. Master Configurations ````````````````````` While reasons may exist to maintain separate master configurations, it is wise to remember that each master maintains independent control over minions. Therefore, access controls should be in sync between masters unless a valid reason otherwise exists to keep them inconsistent. These access control options include but are not limited to: - external_auth - publisher_acl - peer - peer_run
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/multimaster.rst
0.72526
0.906073
multimaster.rst
pypi
.. _tutorial-multi-master-pki: ======================================= Multi-Master-PKI Tutorial With Failover ======================================= This tutorial will explain, how to run a salt-environment where a single minion can have multiple masters and fail-over between them if its current master fails. The individual steps are - setup the master(s) to sign its auth-replies - setup minion(s) to verify master-public-keys - enable multiple masters on minion(s) - enable master-check on minion(s) Please note, that it is advised to have good knowledge of the salt- authentication and communication-process to understand this tutorial. All of the settings described here, go on top of the default authentication/communication process. Motivation ========== The default behaviour of a salt-minion is to connect to a master and accept the masters public key. With each publication, the master sends his public-key for the minion to check and if this public-key ever changes, the minion complains and exits. Practically this means, that there can only be a single master at any given time. Would it not be much nicer, if the minion could have any number of masters (1:n) and jump to the next master if its current master died because of a network or hardware failure? .. note:: There is also a MultiMaster-Tutorial with a different approach and topology than this one, that might also suite your needs or might even be better suited `Multi-Master Tutorial <https://docs.saltproject.io/en/latest/topics/tutorials/multimaster.html>`_ It is also desirable, to add some sort of authenticity-check to the very first public key a minion receives from a master. Currently a minions takes the first masters public key for granted. The Goal ======== Setup the master to sign the public key it sends to the minions and enable the minions to verify this signature for authenticity. Prepping the master to sign its public key ========================================== For signing to work, both master and minion must have the signing and/or verification settings enabled. If the master signs the public key but the minion does not verify it, the minion will complain and exit. The same happens, when the master does not sign but the minion tries to verify. The easiest way to have the master sign its public key is to set .. code-block:: yaml master_sign_pubkey: True After restarting the salt-master service, the master will automatically generate a new key-pair .. code-block:: yaml master_sign.pem master_sign.pub A custom name can be set for the signing key-pair by setting .. code-block:: yaml master_sign_key_name: <name_without_suffix> The master will then generate that key-pair upon restart and use it for creating the public keys signature attached to the auth-reply. The computation is done for every auth-request of a minion. If many minions auth very often, it is advised to use conf_master:`master_pubkey_signature` and conf_master:`master_use_pubkey_signature` settings described below. If multiple masters are in use and should sign their auth-replies, the signing key-pair master_sign.* has to be copied to each master. Otherwise a minion will fail to verify the masters public when connecting to a different master than it did initially. That is because the public keys signature was created with a different signing key-pair. Prepping the minion to verify received public keys ================================================== The minion must have the public key (and only that one!) available to be able to verify a signature it receives. That public key (defaults to master_sign.pub) must be copied from the master to the minions pki-directory. .. code-block:: bash /etc/salt/pki/minion/master_sign.pub .. important:: DO NOT COPY THE master_sign.pem FILE. IT MUST STAY ON THE MASTER AND ONLY THERE! When that is done, enable the signature checking in the minions configuration .. code-block:: yaml verify_master_pubkey_sign: True and restart the minion. For the first try, the minion should be run in manual debug mode. .. code-block:: bash salt-minion -l debug Upon connecting to the master, the following lines should appear on the output: .. code-block:: text [DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.10 [DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem [DEBUG ] salt.crypt.verify_signature: Loading public key [DEBUG ] salt.crypt.verify_signature: Verifying signature [DEBUG ] Successfully verified signature of master public key with verification public key master_sign.pub [INFO ] Received signed and verified master pubkey from master 172.16.0.10 [DEBUG ] Decrypting the current master AES key If the signature verification fails, something went wrong and it will look like this .. code-block:: text [DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.10 [DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem [DEBUG ] salt.crypt.verify_signature: Loading public key [DEBUG ] salt.crypt.verify_signature: Verifying signature [DEBUG ] Failed to verify signature of public key [CRITICAL] The Salt Master server's public key did not authenticate! In a case like this, it should be checked, that the verification pubkey (master_sign.pub) on the minion is the same as the one on the master. Once the verification is successful, the minion can be started in daemon mode again. For the paranoid among us, its also possible to verify the publication whenever it is received from the master. That is, for every single auth-attempt which can be quite frequent. For example just the start of the minion will force the signature to be checked 6 times for various things like auth, mine, :ref:`highstate <running-highstate>`, etc. If that is desired, enable the setting .. code-block:: yaml always_verify_signature: True Multiple Masters For A Minion ============================= Configuring multiple masters on a minion is done by specifying two settings: - a list of masters addresses - what type of master is defined .. code-block:: yaml master: - 172.16.0.10 - 172.16.0.11 - 172.16.0.12 .. code-block:: yaml master_type: failover This tells the minion that all the master above are available for it to connect to. When started with this configuration, it will try the master in the order they are defined. To randomize that order, set .. code-block:: yaml master_shuffle: True The master-list will then be shuffled before the first connection attempt. The first master that accepts the minion, is used by the minion. If the master does not yet know the minion, that counts as accepted and the minion stays on that master. For the minion to be able to detect if its still connected to its current master enable the check for it .. code-block:: yaml master_alive_interval: <seconds> If the loss of the connection is detected, the minion will temporarily remove the failed master from the list and try one of the other masters defined (again shuffled if that is enabled). Testing the setup ================= At least two running masters are needed to test the failover setup. Both masters should be running and the minion should be running on the command line in debug mode .. code-block:: bash salt-minion -l debug The minion will connect to the first master from its master list .. code-block:: bash [DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.10 [DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem [DEBUG ] salt.crypt.verify_signature: Loading public key [DEBUG ] salt.crypt.verify_signature: Verifying signature [DEBUG ] Successfully verified signature of master public key with verification public key master_sign.pub [INFO ] Received signed and verified master pubkey from master 172.16.0.10 [DEBUG ] Decrypting the current master AES key A test.version on the master the minion is currently connected to should be run to test connectivity. If successful, that master should be turned off. A firewall-rule denying the minions packets will also do the trick. Depending on the configured conf_minion:`master_alive_interval`, the minion will notice the loss of the connection and log it to its logfile. .. code-block:: bash [INFO ] Connection to master 172.16.0.10 lost [INFO ] Trying to tune in to next master from master-list The minion will then remove the current master from the list and try connecting to the next master .. code-block:: bash [INFO ] Removing possibly failed master 172.16.0.10 from list of masters [WARNING ] Master ip address changed from 172.16.0.10 to 172.16.0.11 [DEBUG ] Attempting to authenticate with the Salt Master at 172.16.0.11 If everything is configured correctly, the new masters public key will be verified successfully .. code-block:: bash [DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem [DEBUG ] salt.crypt.verify_signature: Loading public key [DEBUG ] salt.crypt.verify_signature: Verifying signature [DEBUG ] Successfully verified signature of master public key with verification public key master_sign.pub the authentication with the new master is successful .. code-block:: bash [INFO ] Received signed and verified master pubkey from master 172.16.0.11 [DEBUG ] Decrypting the current master AES key [DEBUG ] Loaded minion key: /etc/salt/pki/minion/minion.pem [INFO ] Authentication with master successful! and the minion can be pinged again from its new master. Performance Tuning ================== With the setup described above, the master computes a signature for every auth-request of a minion. With many minions and many auth-requests, that can chew up quite a bit of CPU-Power. To avoid that, the master can use a pre-created signature of its public-key. The signature is saved as a base64 encoded string which the master reads once when starting and attaches only that string to auth-replies. Enabling this also gives paranoid users the possibility, to have the signing key-pair on a different system than the actual salt-master and create the public keys signature there. Probably on a system with more restrictive firewall rules, without internet access, less users, etc. That signature can be created with .. code-block:: bash salt-key --gen-signature This will create a default signature file in the master pki-directory .. code-block:: bash /etc/salt/pki/master/master_pubkey_signature It is a simple text-file with the binary-signature converted to base64. If no signing-pair is present yet, this will auto-create the signing pair and the signature file in one call .. code-block:: bash salt-key --gen-signature --auto-create Telling the master to use the pre-created signature is done with .. code-block:: yaml master_use_pubkey_signature: True That requires the file 'master_pubkey_signature' to be present in the masters pki-directory with the correct signature. If the signature file is named differently, its name can be set with .. code-block:: yaml master_pubkey_signature: <filename> With many masters and many public-keys (default and signing), it is advised to use the salt-masters hostname for the signature-files name. Signatures can be easily confused because they do not provide any information about the key the signature was created from. Verifying that everything works is done the same way as above. How the signing and verification works ====================================== The default key-pair of the salt-master is .. code-block:: yaml /etc/salt/pki/master/master.pem /etc/salt/pki/master/master.pub To be able to create a signature of a message (in this case a public-key), another key-pair has to be added to the setup. Its default name is: .. code-block:: yaml master_sign.pem master_sign.pub The combination of the master.* and master_sign.* key-pairs give the possibility of generating signatures. The signature of a given message is unique and can be verified, if the public-key of the signing-key-pair is available to the recipient (the minion). The signature of the masters public-key in master.pub is computed with .. code-block:: yaml master_sign.pem master.pub M2Crypto.EVP.sign_update() This results in a binary signature which is converted to base64 and attached to the auth-reply send to the minion. With the signing-pairs public-key available to the minion, the attached signature can be verified with .. code-block:: yaml master_sign.pub master.pub M2Cryptos EVP.verify_update(). When running multiple masters, either the signing key-pair has to be present on all of them, or the master_pubkey_signature has to be pre-computed for each master individually (because they all have different public-keys). DO NOT PUT THE SAME master.pub ON ALL MASTERS FOR EASE OF USE.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/multimaster_pki.rst
0.740737
0.779154
multimaster_pki.rst
pypi
.. _tutorial-states-part-2: ========================================================= States tutorial, part 2 - More Complex States, Requisites ========================================================= .. note:: This tutorial builds on topics covered in :ref:`part 1 <states-tutorial>`. It is recommended that you begin there. In the :ref:`last part <states-tutorial>` of the Salt States tutorial we covered the basics of installing a package. We will now modify our ``webserver.sls`` file to have requirements, and use even more Salt States. Call multiple States ==================== You can specify multiple :ref:`state-declaration` under an :ref:`id-declaration`. For example, a quick modification to our ``webserver.sls`` to also start Apache if it is not running: .. code-block:: yaml :linenos: :emphasize-lines: 4,5 apache: pkg.installed: [] service.running: - require: - pkg: apache Try stopping Apache before running :py:func:`state.apply <salt.modules.state.apply_>` once again and observe the output. .. note:: For those running RedhatOS derivatives (Centos, AWS), you will want to specify the service name to be httpd. More on state service here, :mod:`service state <salt.states.service>`. With the example above, just add "- name: httpd" above the require line and with the same spacing. Require other states ==================== We now have a working installation of Apache so let's add an HTML file to customize our website. It isn't exactly useful to have a website without a webserver so we don't want Salt to install our HTML file until Apache is installed and running. Include the following at the bottom of your ``webserver/init.sls`` file: .. code-block:: yaml :linenos: :emphasize-lines: 7,11 apache: pkg.installed: [] service.running: - require: - pkg: apache /var/www/index.html: # ID declaration file: # state declaration - managed # function - source: salt://webserver/index.html # function arg - require: # requisite declaration - pkg: apache # requisite reference **line 7** is the :ref:`id-declaration`. In this example it is the location we want to install our custom HTML file. (**Note:** the default location that Apache serves may differ from the above on your OS or distro. ``/srv/www`` could also be a likely place to look.) **Line 8** the :ref:`state-declaration`. This example uses the Salt :mod:`file state <salt.states.file>`. **Line 9** is the :ref:`function-declaration`. The :func:`managed function <salt.states.file.managed>` will download a file from the master and install it in the location specified. **Line 10** is a :ref:`function-arg-declaration` which, in this example, passes the ``source`` argument to the :func:`managed function <salt.states.file.managed>`. **Line 11** is a :ref:`requisite-declaration`. **Line 12** is a :ref:`requisite-reference` which refers to a state and an ID. In this example, it is referring to the ``ID declaration`` from our example in :ref:`part 1 <states-tutorial>`. This declaration tells Salt not to install the HTML file until Apache is installed. Next, create the ``index.html`` file and save it in the ``webserver`` directory: .. code-block:: html <!DOCTYPE html> <html> <head><title>Salt rocks</title></head> <body> <h1>This file brought to you by Salt</h1> </body> </html> Last, call :func:`state.apply <salt.modules.state.apply_>` again and the minion will fetch and execute the :ref:`highstate <running-highstate>` as well as our HTML file from the master using Salt's File Server: .. code-block:: bash salt '*' state.apply Verify that Apache is now serving your custom HTML. .. admonition:: ``require`` vs. ``watch`` There are two :ref:`requisite-declaration`, “require”, and “watch”. Not every state supports “watch”. The :mod:`service state <salt.states.service>` does support “watch” and will restart a service based on the watch condition. For example, if you use Salt to install an Apache virtual host configuration file and want to restart Apache whenever that file is changed you could modify our Apache example from earlier as follows: .. code-block:: yaml :emphasize-lines: 1,2,3,10,11 /etc/httpd/extra/httpd-vhosts.conf: file.managed: - source: salt://webserver/httpd-vhosts.conf apache: pkg.installed: [] service.running: - watch: - file: /etc/httpd/extra/httpd-vhosts.conf - require: - pkg: apache If the pkg and service names differ on your OS or distro of choice you can specify each one separately using a :ref:`name-declaration` which explained in :ref:`Part 3 <tutorial-states-part-3>`. Next steps ========== In :ref:`part 3 <tutorial-states-part-3>` we will discuss how to use includes, extends, and templating to make a more complete State Tree configuration.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/states_pt2.rst
0.897412
0.729158
states_pt2.rst
pypi
.. _tutorial-http: HTTP Modules ============ This tutorial demonstrates using the various HTTP modules available in Salt. These modules wrap the Python ``tornado``, ``urllib2``, and ``requests`` libraries, extending them in a manner that is more consistent with Salt workflows. The ``salt.utils.http`` Library ------------------------------- This library forms the core of the HTTP modules. Since it is designed to be used from the minion as an execution module, in addition to the master as a runner, it was abstracted into this multi-use library. This library can also be imported by 3rd-party programs wishing to take advantage of its extended functionality. Core functionality of the execution, state, and runner modules is derived from this library, so common usages between them are described here. Documentation specific to each module is described below. This library can be imported with: .. code-block:: python import salt.utils.http Configuring Libraries ~~~~~~~~~~~~~~~~~~~~~ This library can make use of either ``tornado``, which is required by Salt, ``urllib2``, which ships with Python, or ``requests``, which can be installed separately. By default, ``tornado`` will be used. In order to switch to ``urllib2``, set the following variable: .. code-block:: yaml backend: urllib2 In order to switch to ``requests``, set the following variable: .. code-block:: yaml backend: requests This can be set in the master or minion configuration file, or passed as an option directly to any ``http.query()`` functions. ``salt.utils.http.query()`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This function forms a basic query, but with some add-ons not present in the ``tornado``, ``urllib2``, and ``requests`` libraries. Not all functionality currently available in these libraries has been added, but can be in future iterations. HTTPS Request Methods ````````````````````` A basic query can be performed by calling this function with no more than a single URL: .. code-block:: python salt.utils.http.query("http://example.com") By default the query will be performed with a ``GET`` method. The method can be overridden with the ``method`` argument: .. code-block:: python salt.utils.http.query("http://example.com/delete/url", "DELETE") When using the ``POST`` method (and others, such as ``PUT``), extra data is usually sent as well. This data can be sent directly (would be URL encoded when necessary), or in whatever format is required by the remote server (XML, JSON, plain text, etc). .. code-block:: python salt.utils.http.query( "http://example.com/post/url", method="POST", data=json.dumps(mydict) ) Data Formatting and Templating `````````````````````````````` Bear in mind that the data must be sent pre-formatted; this function will not format it for you. However, a templated file stored on the local system may be passed through, along with variables to populate it with. To pass through only the file (untemplated): .. code-block:: python salt.utils.http.query( "http://example.com/post/url", method="POST", data_file="/srv/salt/somefile.xml" ) To pass through a file that contains jinja + yaml templating (the default): .. code-block:: python salt.utils.http.query( "http://example.com/post/url", method="POST", data_file="/srv/salt/somefile.jinja", data_render=True, template_dict={"key1": "value1", "key2": "value2"}, ) To pass through a file that contains mako templating: .. code-block:: python salt.utils.http.query( "http://example.com/post/url", method="POST", data_file="/srv/salt/somefile.mako", data_render=True, data_renderer="mako", template_dict={"key1": "value1", "key2": "value2"}, ) Because this function uses Salt's own rendering system, any Salt renderer can be used. Because Salt's renderer requires ``__opts__`` to be set, an ``opts`` dictionary should be passed in. If it is not, then the default ``__opts__`` values for the node type (master or minion) will be used. Because this library is intended primarily for use by minions, the default node type is ``minion``. However, this can be changed to ``master`` if necessary. .. code-block:: python salt.utils.http.query( "http://example.com/post/url", method="POST", data_file="/srv/salt/somefile.jinja", data_render=True, template_dict={"key1": "value1", "key2": "value2"}, opts=__opts__, ) salt.utils.http.query( "http://example.com/post/url", method="POST", data_file="/srv/salt/somefile.jinja", data_render=True, template_dict={"key1": "value1", "key2": "value2"}, node="master", ) Headers ``````` Headers may also be passed through, either as a ``header_list``, a ``header_dict``, or as a ``header_file``. As with the ``data_file``, the ``header_file`` may also be templated. Take note that because HTTP headers are normally syntactically-correct YAML, they will automatically be imported as an a Python dict. .. code-block:: python salt.utils.http.query( "http://example.com/delete/url", method="POST", header_file="/srv/salt/headers.jinja", header_render=True, header_renderer="jinja", template_dict={"key1": "value1", "key2": "value2"}, ) Because much of the data that would be templated between headers and data may be the same, the ``template_dict`` is the same for both. Correcting possible variable name collisions is up to the user. Authentication `````````````` The ``query()`` function supports basic HTTP authentication. A username and password may be passed in as ``username`` and ``password``, respectively. .. code-block:: python salt.utils.http.query("http://example.com", username="larry", password="5700g3543v4r") Cookies and Sessions ```````````````````` Cookies are also supported, using Python's built-in ``cookielib``. However, they are turned off by default. To turn cookies on, set ``cookies`` to True. .. code-block:: python salt.utils.http.query("http://example.com", cookies=True) By default cookies are stored in Salt's cache directory, normally ``/var/cache/salt``, as a file called ``cookies.txt``. However, this location may be changed with the ``cookie_jar`` argument: .. code-block:: python salt.utils.http.query( "http://example.com", cookies=True, cookie_jar="/path/to/cookie_jar.txt" ) By default, the format of the cookie jar is LWP (aka, lib-www-perl). This default was chosen because it is a human-readable text file. If desired, the format of the cookie jar can be set to Mozilla: .. code-block:: python salt.utils.http.query( "http://example.com", cookies=True, cookie_jar="/path/to/cookie_jar.txt", cookie_format="mozilla", ) Because Salt commands are normally one-off commands that are piped together, this library cannot normally behave as a normal browser, with session cookies that persist across multiple HTTP requests. However, the session can be persisted in a separate cookie jar. The default filename for this file, inside Salt's cache directory, is ``cookies.session.p``. This can also be changed. .. code-block:: python salt.utils.http.query( "http://example.com", persist_session=True, session_cookie_jar="/path/to/jar.p" ) The format of this file is msgpack, which is consistent with much of the rest of Salt's internal structure. Historically, the extension for this file is ``.p``. There are no current plans to make this configurable. Proxy ````` If the ``tornado`` backend is used (``tornado`` is the default), proxy information configured in ``proxy_host``, ``proxy_port``, ``proxy_username``, ``proxy_password`` and ``no_proxy`` from the ``__opts__`` dictionary will be used. Normally these are set in the minion configuration file. .. code-block:: yaml proxy_host: proxy.my-domain proxy_port: 31337 proxy_username: charon proxy_password: obolus no_proxy: ['127.0.0.1', 'localhost'] .. code-block:: python salt.utils.http.query("http://example.com", opts=__opts__, backend="tornado") Return Data ~~~~~~~~~~~ .. note:: Return data encoding If ``decode`` is set to ``True``, ``query()`` will attempt to decode the return data. ``decode_type`` defaults to ``auto``. Set it to a specific encoding, ``xml``, for example, to override autodetection. Because Salt's http library was designed to be used with REST interfaces, ``query()`` will attempt to decode the data received from the remote server when ``decode`` is set to ``True``. First it will check the ``Content-type`` header to try and find references to XML. If it does not find any, it will look for references to JSON. If it does not find any, it will fall back to plain text, which will not be decoded. JSON data is translated into a dict using Python's built-in ``json`` library. XML is translated using ``salt.utils.xml_util``, which will use Python's built-in XML libraries to attempt to convert the XML into a dict. In order to force either JSON or XML decoding, the ``decode_type`` may be set: .. code-block:: python salt.utils.http.query("http://example.com", decode_type="xml") Once translated, the return dict from ``query()`` will include a dict called ``dict``. If the data is not to be translated using one of these methods, decoding may be turned off. .. code-block:: python salt.utils.http.query("http://example.com", decode=False) If decoding is turned on, and references to JSON or XML cannot be found, then this module will default to plain text, and return the undecoded data as ``text`` (even if text is set to ``False``; see below). The ``query()`` function can return the HTTP status code, headers, and/or text as required. However, each must individually be turned on. .. code-block:: python salt.utils.http.query("http://example.com", status=True, headers=True, text=True) The return from these will be found in the return dict as ``status``, ``headers`` and ``text``, respectively. Writing Return Data to Files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible to write either the return data or headers to files, as soon as the response is received from the server, but specifying file locations via the ``text_out`` or ``headers_out`` arguments. ``text`` and ``headers`` do not need to be returned to the user in order to do this. .. code-block:: python salt.utils.http.query( "http://example.com", text=False, headers=False, text_out="/path/to/url_download.txt", headers_out="/path/to/headers_download.txt", ) SSL Verification ~~~~~~~~~~~~~~~~ By default, this function will verify SSL certificates. However, for testing or debugging purposes, SSL verification can be turned off. .. code-block:: python salt.utils.http.query("https://example.com", verify_ssl=False) CA Bundles ~~~~~~~~~~ The ``requests`` library has its own method of detecting which CA (certificate authority) bundle file to use. Usually this is implemented by the packager for the specific operating system distribution that you are using. However, ``urllib2`` requires a little more work under the hood. By default, Salt will try to auto-detect the location of this file. However, if it is not in an expected location, or a different path needs to be specified, it may be done so using the ``ca_bundle`` variable. .. code-block:: python salt.utils.http.query("https://example.com", ca_bundle="/path/to/ca_bundle.pem") Updating CA Bundles ``````````````````` The ``update_ca_bundle()`` function can be used to update the bundle file at a specified location. If the target location is not specified, then it will attempt to auto-detect the location of the bundle file. If the URL to download the bundle from does not exist, a bundle will be downloaded from the cURL website. CAUTION: The ``target`` and the ``source`` should always be specified! Failure to specify the ``target`` may result in the file being written to the wrong location on the local system. Failure to specify the ``source`` may cause the upstream URL to receive excess unnecessary traffic, and may cause a file to be download which is hazardous or does not meet the needs of the user. .. code-block:: python salt.utils.http.update_ca_bundle( target="/path/to/ca-bundle.crt", source="https://example.com/path/to/ca-bundle.crt", opts=__opts__, ) The ``opts`` parameter should also always be specified. If it is, then the ``target`` and the ``source`` may be specified in the relevant configuration file (master or minion) as ``ca_bundle`` and ``ca_bundle_url``, respectively. .. code-block:: yaml ca_bundle: /path/to/ca-bundle.crt ca_bundle_url: https://example.com/path/to/ca-bundle.crt If Salt is unable to auto-detect the location of the CA bundle, it will raise an error. The ``update_ca_bundle()`` function can also be passed a string or a list of strings which represent files on the local system, which should be appended (in the specified order) to the end of the CA bundle file. This is useful in environments where private certs need to be made available, and are not otherwise reasonable to add to the bundle file. .. code-block:: python salt.utils.http.update_ca_bundle( opts=__opts__, merge_files=[ "/etc/ssl/private_cert_1.pem", "/etc/ssl/private_cert_2.pem", "/etc/ssl/private_cert_3.pem", ], ) Test Mode ~~~~~~~~~ This function may be run in test mode. This mode will perform all work up until the actual HTTP request. By default, instead of performing the request, an empty dict will be returned. Using this function with ``TRACE`` logging turned on will reveal the contents of the headers and POST data to be sent. Rather than returning an empty dict, an alternate ``test_url`` may be passed in. If this is detected, then test mode will replace the ``url`` with the ``test_url``, set ``test`` to ``True`` in the return data, and perform the rest of the requested operations as usual. This allows a custom, non-destructive URL to be used for testing when necessary. Execution Module ---------------- The ``http`` execution module is a very thin wrapper around the ``salt.utils.http`` library. The ``opts`` can be passed through as well, but if they are not specified, the minion defaults will be used as necessary. Because passing complete data structures from the command line can be tricky at best and dangerous (in terms of execution injection attacks) at worse, the ``data_file``, and ``header_file`` are likely to see more use here. All methods for the library are available in the execution module, as kwargs. .. code-block:: bash salt myminion http.query http://example.com/restapi method=POST \ username='larry' password='5700g3543v4r' headers=True text=True \ status=True decode_type=xml data_render=True \ header_file=/tmp/headers.txt data_file=/tmp/data.txt \ header_render=True cookies=True persist_session=True Runner Module ------------- Like the execution module, the ``http`` runner module is a very thin wrapper around the ``salt.utils.http`` library. The only significant difference is that because runners execute on the master instead of a minion, a target is not required, and default opts will be derived from the master config, rather than the minion config. All methods for the library are available in the runner module, as kwargs. .. code-block:: bash salt-run http.query http://example.com/restapi method=POST \ username='larry' password='5700g3543v4r' headers=True text=True \ status=True decode_type=xml data_render=True \ header_file=/tmp/headers.txt data_file=/tmp/data.txt \ header_render=True cookies=True persist_session=True State Module ------------ The state module is a wrapper around the runner module, which applies stateful logic to a query. All kwargs as listed above are specified as usual in state files, but two more kwargs are available to apply stateful logic. A required parameter is ``match``, which specifies a pattern to look for in the return text. By default, this will perform a string comparison of looking for the value of match in the return text. In Python terms this looks like: .. code-block:: python def myfunc(): if match in html_text: return True If more complex pattern matching is required, a regular expression can be used by specifying a ``match_type``. By default this is set to ``string``, but it can be manually set to ``pcre`` instead. Please note that despite the name, this will use Python's ``re.search()`` rather than ``re.match()``. Therefore, the following states are valid: .. code-block:: yaml http://example.com/restapi: http.query: - match: 'SUCCESS' - username: 'larry' - password: '5700g3543v4r' - data_render: True - header_file: /tmp/headers.txt - data_file: /tmp/data.txt - header_render: True - cookies: True - persist_session: True http://example.com/restapi: http.query: - match_type: pcre - match: '(?i)succe[ss|ed]' - username: 'larry' - password: '5700g3543v4r' - data_render: True - header_file: /tmp/headers.txt - data_file: /tmp/data.txt - header_render: True - cookies: True - persist_session: True In addition to, or instead of a match pattern, the status code for a URL can be checked. This is done using the ``status`` argument: .. code-block:: yaml http://example.com/: http.query: - status: 200 If both are specified, both will be checked, but if only one is ``True`` and the other is ``False``, then ``False`` will be returned. In this case, the comments in the return data will contain information for troubleshooting. Because this is a monitoring state, it will return extra data to code that expects it. This data will always include ``text`` and ``status``. Optionally, ``headers`` and ``dict`` may also be requested by setting the ``headers`` and ``decode`` arguments to True, respectively.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/http.rst
0.882174
0.904482
http.rst
pypi
.. _states-tutorial: ===================================== States tutorial, part 1 - Basic Usage ===================================== The purpose of this tutorial is to demonstrate how quickly you can configure a system to be managed by Salt States. For detailed information about the state system please refer to the full :ref:`states reference <state-system-reference>`. This tutorial will walk you through using Salt to configure a minion to run the Apache HTTP server and to ensure the server is running. .. include:: /_incl/requisite_incl.rst Setting up the Salt State Tree ============================== States are stored in text files on the master and transferred to the minions on demand via the master's File Server. The collection of state files make up the ``State Tree``. To start using a central state system in Salt, the Salt File Server must first be set up. Edit the master config file (:conf_master:`file_roots`) and uncomment the following lines: .. code-block:: yaml file_roots: base: - /srv/salt .. note:: If you are deploying on FreeBSD via ports, the ``file_roots`` path defaults to ``/usr/local/etc/salt/states``. Restart the Salt master in order to pick up this change: .. code-block:: bash pkill salt-master salt-master -d Preparing the Top File ====================== On the master, in the directory uncommented in the previous step, (``/srv/salt`` by default), create a new file called :conf_master:`top.sls <state_top>` and add the following: .. code-block:: yaml base: '*': - webserver The :ref:`top file <states-top>` is separated into environments (discussed later). The default environment is ``base``. Under the ``base`` environment a collection of minion matches is defined; for now simply specify all hosts (``*``). .. _targeting-minions: .. admonition:: Targeting minions The expressions can use any of the targeting mechanisms used by Salt — minions can be matched by glob, PCRE regular expression, or by :ref:`grains <targeting-grains>`. For example: .. code-block:: yaml base: 'os:Fedora': - match: grain - webserver Create an ``sls`` file ====================== In the same directory as the :ref:`top file <states-top>`, create a file named ``webserver.sls``, containing the following: .. code-block:: yaml apache: # ID declaration pkg: # state declaration - installed # function declaration The first line, called the :ref:`id-declaration`, is an arbitrary identifier. In this case it defines the name of the package to be installed. .. note:: The package name for the Apache httpd web server may differ depending on OS or distro — for example, on Fedora it is ``httpd`` but on Debian/Ubuntu it is ``apache2``. The second line, called the :ref:`state-declaration`, defines which of the Salt States we are using. In this example, we are using the :mod:`pkg state <salt.states.pkg>` to ensure that a given package is installed. The third line, called the :ref:`function-declaration`, defines which function in the :mod:`pkg state <salt.states.pkg>` module to call. .. admonition:: Renderers States ``sls`` files can be written in many formats. Salt requires only a simple data structure and is not concerned with how that data structure is built. Templating languages and `DSLs`_ are a dime-a-dozen and everyone has a favorite. Building the expected data structure is the job of Salt :ref:`renderers` and they are dead-simple to write. In this tutorial we will be using YAML in Jinja2 templates, which is the default format. The default can be changed by editing :conf_master:`renderer` in the master configuration file. .. _`DSLs`: https://en.wikipedia.org/wiki/Domain-specific_language .. _running-highstate: Install the package =================== Next, let's run the state we created. Open a terminal on the master and run: .. code-block:: bash salt '*' state.apply Our master is instructing all targeted minions to run :func:`state.apply <salt.modules.state.apply>`. When this function is executed without any SLS targets, a minion will download the :ref:`top file <states-top>` and attempt to match the expressions within it. When the minion does match an expression the modules listed for it will be downloaded, compiled, and executed. .. note:: This action is referred to as a "highstate", and can be run using the :py:func:`state.highstate <salt.modules.state.highstate>` function. However, to make the usage easier to understand ("highstate" is not necessarily an intuitive name), a :py:func:`state.apply <salt.modules.state.apply_>` function was added in version 2015.5.0, which when invoked without any SLS names will trigger a highstate. :py:func:`state.highstate <salt.modules.state.highstate>` still exists and can be used, but the documentation (as can be seen above) has been updated to reference :py:func:`state.apply <salt.modules.state.apply_>`, so keep the following in mind as you read the documentation: - :py:func:`state.apply <salt.modules.state.apply_>` invoked without any SLS names will run :py:func:`state.highstate <salt.modules.state.highstate>` - :py:func:`state.apply <salt.modules.state.apply_>` invoked with SLS names will run :py:func:`state.sls <salt.modules.state.sls>` Once completed, the minion will report back with a summary of all actions taken and all changes made. .. warning:: If you have created :ref:`custom grain modules <writing-grains>`, they will not be available in the top file until after the first :ref:`highstate <running-highstate>`. To make custom grains available on a minion's first :ref:`highstate <running-highstate>`, it is recommended to use :ref:`this example <minion-start-reactor>` to ensure that the custom grains are synced when the minion starts. .. _sls-file-namespace: .. admonition:: SLS File Namespace Note that in the :ref:`example <targeting-minions>` above, the SLS file ``webserver.sls`` was referred to simply as ``webserver``. The namespace for SLS files when referenced in :conf_master:`top.sls <state_top>` or an :ref:`include-declaration` follows a few simple rules: 1. The ``.sls`` is discarded (i.e. ``webserver.sls`` becomes ``webserver``). 2. Subdirectories can be used for better organization. a. Each subdirectory under the configured file_roots (default: ``/srv/salt/``) is represented with a dot (following the Python import model) in Salt states and on the command line. ``webserver/dev.sls`` on the filesystem is referred to as ``webserver.dev`` in Salt b. Because slashes are represented as dots, SLS files can not contain dots in the name (other than the dot for the SLS suffix). The SLS file ``webserver_1.0.sls`` can not be matched, and ``webserver_1.0`` would match the directory/file ``webserver_1/0.sls`` 3. A file called ``init.sls`` in a subdirectory is referred to by the path of the directory. So, ``webserver/init.sls`` is referred to as ``webserver``. 4. If both ``webserver.sls`` and ``webserver/init.sls`` happen to exist, ``webserver/init.sls`` will be ignored and ``webserver.sls`` will be the file referred to as ``webserver``. .. admonition:: Troubleshooting Salt If the expected output isn't seen, the following tips can help to narrow down the problem. Turn up logging Salt can be quite chatty when you change the logging setting to ``debug``: .. code-block:: bash salt-minion -l debug Run the minion in the foreground By not starting the minion in daemon mode (:option:`-d <salt-minion -d>`) one can view any output from the minion as it works: .. code-block:: bash salt-minion Increase the default timeout value when running :command:`salt`. For example, to change the default timeout to 60 seconds: .. code-block:: bash salt -t 60 For best results, combine all three: .. code-block:: bash salt-minion -l debug # On the minion salt '*' state.apply -t 60 # On the master Next steps ========== This tutorial focused on getting a simple Salt States configuration working. :ref:`Part 2 <tutorial-states-part-2>` will build on this example to cover more advanced ``sls`` syntax and will explore more of the states that ship with Salt.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/tutorials/states_pt1.rst
0.920196
0.736164
states_pt1.rst
pypi
.. _troubleshooting: =============== Troubleshooting =============== The intent of the troubleshooting section is to introduce solutions to a number of common issues encountered by users and the tools that are available to aid in developing States and Salt code. Troubleshooting the Salt Master =============================== If your Salt master is having issues such as minions not returning data, slow execution times, or a variety of other issues, the following links contain details on troubleshooting the most common issues encountered: .. toctree:: :maxdepth: 2 master Troubleshooting the Salt Minion =============================== In the event that your Salt minion is having issues, a variety of solutions and suggestions are available. Please refer to the following links for more information: .. toctree:: :maxdepth: 2 minion Running in the Foreground ========================= A great deal of information is available via the debug logging system, if you are having issues with minions connecting or not starting run the minion and/or master in the foreground: .. code-block:: bash salt-master -l debug salt-minion -l debug Anyone wanting to run Salt daemons via a process supervisor such as `monit`_, `runit`_, or `supervisord`_, should omit the ``-d`` argument to the daemons and run them in the foreground. .. _`monit`: https://mmonit.com/monit/ .. _`runit`: http://smarden.org/runit/ .. _`supervisord`: http://supervisord.org/ What Ports do the Master and Minion Need Open? ============================================== No ports need to be opened up on each minion. For the master, TCP ports 4505 and 4506 need to be open. If you've put both your Salt master and minion in debug mode and don't see an acknowledgment that your minion has connected, it could very well be a firewall. You can check port connectivity from the minion with the nc command: .. code-block:: bash nc -v -z salt.master.ip 4505 nc -v -z salt.master.ip 4506 There is also a :ref:`firewall configuration<firewall>` document that might help as well. If you've enabled the right TCP ports on your operating system or Linux distribution's firewall and still aren't seeing connections, check that no additional access control system such as `SELinux`_ or `AppArmor`_ is blocking Salt. .. _`SELinux`: https://en.wikipedia.org/wiki/Security-Enhanced_Linux .. _`AppArmor`: https://gitlab.com/apparmor/apparmor/-/wikis/home .. _using-salt-call: Using salt-call =============== The ``salt-call`` command was originally developed for aiding in the development of new Salt modules. Since then, many applications have been developed for running any Salt module locally on a minion. These range from the original intent of salt-call, development assistance, to gathering more verbose output from calls like :mod:`state.apply <salt.modules.state.apply_>`. When initially creating your state tree, it is generally recommended to invoke :mod:`state.apply <salt.modules.state.apply_>` directly from the minion with ``salt-call``, rather than remotely from the master. This displays far more information about the execution than calling it remotely. For even more verbosity, increase the loglevel using the ``-l`` argument: .. code-block:: bash salt-call -l debug state.apply The main difference between using ``salt`` and using ``salt-call`` is that ``salt-call`` is run from the minion, and it only runs the selected function on that minion. By contrast, ``salt`` is run from the master, and requires you to specify the minions on which to run the command using salt's :ref:`targeting system <targeting>`. Too many open files =================== The salt-master needs at least 2 sockets per host that connects to it, one for the Publisher and one for response port. Thus, large installations may, upon scaling up the number of minions accessing a given master, encounter: .. code-block:: console 12:45:29,289 [salt.master ][INFO ] Starting Salt worker process 38 Too many open files sock != -1 (tcp_listener.cpp:335) The solution to this would be to check the number of files allowed to be opened by the user running salt-master (root by default): .. code-block:: bash [root@salt-master ~]# ulimit -n 1024 And modify that value to be at least equal to the number of minions x 2. This setting can be changed in limits.conf as the nofile value(s), and activated upon new a login of the specified user. So, an environment with 1800 minions, would need 1800 x 2 = 3600 as a minimum. Salt Master Stops Responding ============================ There are known bugs with ZeroMQ versions less than 2.1.11 which can cause the Salt master to not respond properly. If you're running a ZeroMQ version greater than or equal to 2.1.9, you can work around the bug by setting the sysctls ``net.core.rmem_max`` and ``net.core.wmem_max`` to 16777216. Next, set the third field in ``net.ipv4.tcp_rmem`` and ``net.ipv4.tcp_wmem`` to at least 16777216. You can do it manually with something like: .. code-block:: bash # echo 16777216 > /proc/sys/net/core/rmem_max # echo 16777216 > /proc/sys/net/core/wmem_max # echo "4096 87380 16777216" > /proc/sys/net/ipv4/tcp_rmem # echo "4096 87380 16777216" > /proc/sys/net/ipv4/tcp_wmem Or with the following Salt state: .. code-block:: yaml :linenos: net.core.rmem_max: sysctl: - present - value: 16777216 net.core.wmem_max: sysctl: - present - value: 16777216 net.ipv4.tcp_rmem: sysctl: - present - value: 4096 87380 16777216 net.ipv4.tcp_wmem: sysctl: - present - value: 4096 87380 16777216 Salt and SELinux ================ Currently there are no SELinux policies for Salt. For the most part Salt runs without issue when SELinux is running in Enforcing mode. This is because when the minion executes as a daemon the type context is changed to ``initrc_t``. The problem with SELinux arises when using salt-call or running the minion in the foreground, since the type context stays ``unconfined_t``. This problem is generally manifest in the rpm install scripts when using the pkg module. Until a full SELinux Policy is available for Salt the solution to this issue is to set the execution context of ``salt-call`` and ``salt-minion`` to rpm_exec_t: .. code-block:: bash # CentOS 5 and RHEL 5: chcon -t system_u:system_r:rpm_exec_t:s0 /usr/bin/salt-minion chcon -t system_u:system_r:rpm_exec_t:s0 /usr/bin/salt-call # CentOS 6 and RHEL 6: chcon system_u:object_r:rpm_exec_t:s0 /usr/bin/salt-minion chcon system_u:object_r:rpm_exec_t:s0 /usr/bin/salt-call This works well, because the ``rpm_exec_t`` context has very broad control over other types. Red Hat Enterprise Linux 5 ========================== Salt requires Python 2.6 or 2.7. Red Hat Enterprise Linux 5 and its variants come with Python 2.4 installed by default. When installing on RHEL 5 from the `EPEL repository`_ this is handled for you. But, if you run Salt from git, be advised that its dependencies need to be installed from EPEL and that Salt needs to be run with the ``python26`` executable. .. _`EPEL repository`: https://fedoraproject.org/wiki/EPEL Common YAML Gotchas =================== An extensive list of YAML idiosyncrasies has been compiled: .. toctree:: :maxdepth: 2 yaml_idiosyncrasies Live Python Debug Output ======================== If the minion or master seems to be unresponsive, a SIGUSR1 can be passed to the processes to display where in the code they are running. If encountering a situation like this, this debug information can be invaluable. First make sure the master of minion are running in the foreground: .. code-block:: bash salt-master -l debug salt-minion -l debug Then pass the signal to the master or minion when it seems to be unresponsive: .. code-block:: bash killall -SIGUSR1 salt-master killall -SIGUSR1 salt-minion Also under BSD and macOS in addition to SIGUSR1 signal, debug subroutine set up for SIGINFO which has an advantage of being sent by Ctrl+T shortcut. When filing an issue or sending questions to the mailing list for a problem with an unresponsive daemon this information can be invaluable. Salt 0.16.x minions cannot communicate with a 0.17.x master =========================================================== As of release 0.17.1 you can no longer run different versions of Salt on your Master and Minion servers. This is due to a protocol change for security purposes. The Salt team will continue to attempt to ensure versions are as backwards compatible as possible. Debugging the Master and Minion =============================== A list of common :ref:`master<troubleshooting-salt-master>` and :ref:`minion<troubleshooting-minion-salt-call>` troubleshooting steps provide a starting point for resolving issues you may encounter.
/salt-ssh-9000.tar.gz/salt-ssh-9000/doc/topics/troubleshooting/index.rst
0.794584
0.686541
index.rst
pypi
import logging # Import salt modules import salt.output log = logging.getLogger(__name__) __virtualname__ = "out" __proxyenabled__ = ["*"] def __virtual__(): return __virtualname__ def out_format(data, out="nested", opts=None, **kwargs): """ Return the formatted outputter string for the Python object. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.out_format "{'key': 'value'}" """ if not opts: opts = __opts__ return salt.output.out_format(data, out, opts=opts, **kwargs) def string_format(data, out="nested", opts=None, **kwargs): """ Return the outputter formatted string, removing the ANSI escape sequences. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.string_format "{'key': 'value'}" out=table """ if not opts: opts = __opts__ return salt.output.string_format(data, out, opts=opts, **kwargs) def html_format(data, out="nested", opts=None, **kwargs): """ Return the formatted string as HTML. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.html_format "{'key': 'value'}" out=yaml """ if not opts: opts = __opts__ return salt.output.html_format(data, out, opts=opts, **kwargs)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/out.py
0.845704
0.201087
out.py
pypi
import salt.utils.path def __virtual__(): """ Only load if the mfs commands are installed """ if salt.utils.path.which("mfsgetgoal"): return "moosefs" return ( False, "The moosefs execution module cannot be loaded: the mfsgetgoal binary is not in" " the path.", ) def dirinfo(path, opts=None): """ Return information on a directory located on the Moose CLI Example: .. code-block:: bash salt '*' moosefs.dirinfo /path/to/dir/ [-[n][h|H]] """ cmd = "mfsdirinfo" ret = {} if opts: cmd += " -" + opts cmd += " " + path out = __salt__["cmd.run_all"](cmd, python_shell=False) output = out["stdout"].splitlines() for line in output: if not line: continue comps = line.split(":") ret[comps[0].strip()] = comps[1].strip() return ret def fileinfo(path): """ Return information on a file located on the Moose CLI Example: .. code-block:: bash salt '*' moosefs.fileinfo /path/to/dir/ """ cmd = "mfsfileinfo " + path ret = {} chunknum = "" out = __salt__["cmd.run_all"](cmd, python_shell=False) output = out["stdout"].splitlines() for line in output: if not line: continue if "/" in line: comps = line.split("/") chunknum = comps[0].strip().split(":") meta = comps[1].strip().split(" ") chunk = chunknum[0].replace("chunk ", "") loc = chunknum[1].strip() id_ = meta[0].replace("(id:", "") ver = meta[1].replace(")", "").replace("ver:", "") ret[chunknum[0]] = { "chunk": chunk, "loc": loc, "id": id_, "ver": ver, } if "copy" in line: copyinfo = line.strip().split(":") ret[chunknum[0]][copyinfo[0]] = { "copy": copyinfo[0].replace("copy ", ""), "ip": copyinfo[1].strip(), "port": copyinfo[2], } return ret def mounts(): """ Return a list of current MooseFS mounts CLI Example: .. code-block:: bash salt '*' moosefs.mounts """ cmd = "mount" ret = {} out = __salt__["cmd.run_all"](cmd) output = out["stdout"].splitlines() for line in output: if not line: continue if "fuse.mfs" in line: comps = line.split(" ") info1 = comps[0].split(":") info2 = info1[1].split("/") ret[comps[2]] = { "remote": { "master": info1[0], "port": info2[0], "subfolder": "/" + info2[1], }, "local": comps[2], "options": (comps[5].replace("(", "").replace(")", "").split(",")), } return ret def getgoal(path, opts=None): """ Return goal(s) for a file or directory CLI Example: .. code-block:: bash salt '*' moosefs.getgoal /path/to/file [-[n][h|H]] salt '*' moosefs.getgoal /path/to/dir/ [-[n][h|H][r]] """ cmd = "mfsgetgoal" ret = {} if opts: cmd += " -" + opts else: opts = "" cmd += " " + path out = __salt__["cmd.run_all"](cmd, python_shell=False) output = out["stdout"].splitlines() if "r" not in opts: goal = output[0].split(": ") ret = { "goal": goal[1], } else: for line in output: if not line: continue if path in line: continue comps = line.split() keytext = comps[0] + " with goal" if keytext not in ret: ret[keytext] = {} ret[keytext][comps[3]] = comps[5] return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/moosefs.py
0.532425
0.232473
moosefs.py
pypi
import salt.utils.platform # Define the module's virtual name __virtualname__ = "shadow" def __virtual__(): """ Only works on Windows systems """ if salt.utils.platform.is_windows(): return __virtualname__ return (False, "Module win_shadow: module only works on Windows systems.") def info(name): """ Return information for the specified user This is just returns dummy data so that salt states can work. :param str name: The name of the user account to show. CLI Example: .. code-block:: bash salt '*' shadow.info root """ info = __salt__["user.info"](name=name) ret = { "name": name, "passwd": "", "lstchg": "", "min": "", "max": "", "warn": "", "inact": "", "expire": "", } if info: ret = { "name": info["name"], "passwd": "Unavailable", "lstchg": info["password_changed"], "min": "", "max": "", "warn": "", "inact": "", "expire": info["expiration_date"], } return ret def set_expire(name, expire): """ Set the expiration date for a user account. :param name: The name of the user account to edit. :param expire: The date the account will expire. :return: True if successful. False if unsuccessful. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.set_expire <username> 2016/7/1 """ return __salt__["user.update"](name, expiration_date=expire) def require_password_change(name): """ Require the user to change their password the next time they log in. :param name: The name of the user account to require a password change. :return: True if successful. False if unsuccessful. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.require_password_change <username> """ return __salt__["user.update"](name, expired=True) def unlock_account(name): """ Unlocks a user account. :param name: The name of the user account to unlock. :return: True if successful. False if unsuccessful. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.unlock_account <username> """ return __salt__["user.update"](name, unlock_account=True) def set_password(name, password): """ Set the password for a named user. :param str name: The name of the user account :param str password: The new password :return: True if successful. False if unsuccessful. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.set_password root mysecretpassword """ return __salt__["user.update"](name=name, password=password)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/win_shadow.py
0.772574
0.241109
win_shadow.py
pypi
import salt.utils.files import salt.utils.stringutils from salt.exceptions import CommandExecutionError, SaltInvocationError try: import pwd except ImportError: pass try: import salt.utils.pycrypto HAS_CRYPT = True except ImportError: HAS_CRYPT = False # Define the module's virtual name __virtualname__ = "shadow" def __virtual__(): if "BSD" in __grains__.get("os", ""): return __virtualname__ return ( False, "The bsd_shadow execution module cannot be loaded: " "only available on BSD family systems.", ) def default_hash(): """ Returns the default hash used for unset passwords CLI Example: .. code-block:: bash salt '*' shadow.default_hash """ return "*" if __grains__["os"].lower() == "freebsd" else "*************" def gen_password(password, crypt_salt=None, algorithm="sha512"): """ Generate hashed password .. note:: When called this function is called directly via remote-execution, the password argument may be displayed in the system's process list. This may be a security risk on certain systems. password Plaintext password to be hashed. crypt_salt Crpytographic salt. If not given, a random 8-character salt will be generated. algorithm The following hash algorithms are supported: * md5 * blowfish (not in mainline glibc, only available in distros that add it) * sha256 * sha512 (default) CLI Example: .. code-block:: bash salt '*' shadow.gen_password 'I_am_password' salt '*' shadow.gen_password 'I_am_password' crypt_salt='I_am_salt' algorithm=sha256 """ if not HAS_CRYPT: raise CommandExecutionError( "gen_password is not available on this operating system " 'because the "crypt" python module is not available.' ) return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm) def info(name): """ Return information for the specified user CLI Example: .. code-block:: bash salt '*' shadow.info someuser """ try: data = pwd.getpwnam(name) ret = {"name": data.pw_name, "passwd": data.pw_passwd} except KeyError: return {"name": "", "passwd": ""} if not isinstance(name, str): name = str(name) if ":" in name: raise SaltInvocationError("Invalid username '{}'".format(name)) if __salt__["cmd.has_exec"]("pw"): change, expire = __salt__["cmd.run_stdout"]( ["pw", "user", "show", name], python_shell=False ).split(":")[5:7] elif __grains__["kernel"] in ("NetBSD", "OpenBSD"): try: with salt.utils.files.fopen("/etc/master.passwd", "r") as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith("{}:".format(name)): key = line.split(":") change, expire = key[5:7] ret["passwd"] = str(key[1]) break except OSError: change = expire = None else: change = expire = None try: ret["change"] = int(change) except ValueError: pass try: ret["expire"] = int(expire) except ValueError: pass return ret def set_change(name, change): """ Sets the time at which the password expires (in seconds since the UNIX epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on FreeBSD. A value of ``0`` sets the password to never expire. CLI Example: .. code-block:: bash salt '*' shadow.set_change username 1419980400 """ pre_info = info(name) if change == pre_info["change"]: return True if __grains__["kernel"] == "FreeBSD": cmd = ["pw", "user", "mod", name, "-f", change] else: cmd = ["usermod", "-f", change, name] __salt__["cmd.run"](cmd, python_shell=False) post_info = info(name) if post_info["change"] != pre_info["change"]: return post_info["change"] == change def set_expire(name, expire): """ Sets the time at which the account expires (in seconds since the UNIX epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on FreeBSD. A value of ``0`` sets the account to never expire. CLI Example: .. code-block:: bash salt '*' shadow.set_expire username 1419980400 """ pre_info = info(name) if expire == pre_info["expire"]: return True if __grains__["kernel"] == "FreeBSD": cmd = ["pw", "user", "mod", name, "-e", expire] else: cmd = ["usermod", "-e", expire, name] __salt__["cmd.run"](cmd, python_shell=False) post_info = info(name) if post_info["expire"] != pre_info["expire"]: return post_info["expire"] == expire def del_password(name): """ .. versionadded:: 2015.8.2 Delete the password from name user CLI Example: .. code-block:: bash salt '*' shadow.del_password username """ cmd = "pw user mod {} -w none".format(name) __salt__["cmd.run"](cmd, python_shell=False, output_loglevel="quiet") uinfo = info(name) return not uinfo["passwd"] def set_password(name, password): """ Set the password for a named user. The password must be a properly defined hash. The password hash can be generated with this command: ``python -c "import crypt; print crypt.crypt('password', ciphersalt)"`` .. note:: When constructing the ``ciphersalt`` string, you must escape any dollar signs, to avoid them being interpolated by the shell. ``'password'`` is, of course, the password for which you want to generate a hash. ``ciphersalt`` is a combination of a cipher identifier, an optional number of rounds, and the cryptographic salt. The arrangement and format of these fields depends on the cipher and which flavor of BSD you are using. For more information on this, see the manpage for ``crpyt(3)``. On NetBSD, additional information is available in ``passwd.conf(5)``. It is important to make sure that a supported cipher is used. CLI Example: .. code-block:: bash salt '*' shadow.set_password someuser '$1$UYCIxa628.9qXjpQCjM4a..' """ if __grains__.get("os", "") == "FreeBSD": cmd = ["pw", "user", "mod", name, "-H", "0"] stdin = password else: cmd = ["usermod", "-p", password, name] stdin = None __salt__["cmd.run"](cmd, stdin=stdin, output_loglevel="quiet", python_shell=False) return info(name)["passwd"] == password
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/bsd_shadow.py
0.623262
0.1929
bsd_shadow.py
pypi
import logging import os import re import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) __virtualname__ = "certutil" def __virtual__(): """ Only works on Windows """ if salt.utils.platform.is_windows(): return __virtualname__ return False, "Module win_certutil: module only works on Windows systems." def get_cert_serial(cert_file, saltenv="base"): """ Get the serial number of a certificate file cert_file (str): The certificate file to find the serial for. Can be a local file or a a file on the file server (``salt://``) Returns: str: The serial number of the certificate if found, otherwise None CLI Example: .. code-block:: bash salt '*' certutil.get_cert_serial <certificate name> """ cert_file = __salt__["cp.cache_file"](cert_file, saltenv) # Since we're allowing a path, let's make sure it exists if not os.path.exists(cert_file): msg = "cert_file not found: {}".format(cert_file) raise CommandExecutionError(msg) cmd = 'certutil.exe -silent -verify "{}"'.format(cert_file) out = __salt__["cmd.run"](cmd) # match serial number by paragraph to work with multiple languages matches = re.search(r":\s*(\w*)\r\n\r\n", out) if matches is not None: return matches.groups()[0].strip() else: return None def get_stored_cert_serials(store): """ Get all of the certificate serials in the specified store store (str): The store to get all the certificate serials from Returns: list: A list of serial numbers found, or an empty list if none found CLI Example: .. code-block:: bash salt '*' certutil.get_stored_cert_serials <store> """ cmd = 'certutil.exe -store "{}"'.format(store) out = __salt__["cmd.run"](cmd) # match serial numbers by header position to work with multiple languages matches = re.findall(r"={16}\r\n.*:\s*(\w*)\r\n", out) return matches def add_store(source, store, retcode=False, saltenv="base"): """ Add the cert to the given Certificate Store source (str): The source certificate file. This is either the path to a local file or a file from the file server in the form of ``salt://path/to/file`` store (str): The certificate store to add the certificate to retcode (bool): If ``True``, return the retcode instead of stdout. Default is ``False`` saltenv (str): The salt environment to use. This is ignored if the path is local CLI Example: .. code-block:: bash salt '*' certutil.add_store salt://cert.cer TrustedPublisher salt '*' certutil.add_store C:\\path\\to\\local.cer TrustedPublisher """ source = __salt__["cp.cache_file"](source, saltenv) # Since we're allowing a path, let's make sure it exists if not os.path.exists(source): msg = "cert_file not found: {}".format(source) raise CommandExecutionError(msg) cmd = 'certutil.exe -addstore {} "{}"'.format(store, source) if retcode: return __salt__["cmd.retcode"](cmd) else: return __salt__["cmd.run"](cmd) def del_store(source, store, retcode=False, saltenv="base"): """ Delete the cert from the given Certificate Store source (str): The source certificate file. This is either the path to a local file or a file from the file server in the form of ``salt://path/to/file`` store (str): The certificate store to delete the certificate from retcode (bool): If ``True``, return the retcode instead of stdout. Default is ``False`` saltenv (str): The salt environment to use. This is ignored if the path is local CLI Example: .. code-block:: bash salt '*' certutil.del_store salt://cert.cer TrustedPublisher salt '*' certutil.del_store C:\\path\\to\\local.cer TrustedPublisher """ source = __salt__["cp.cache_file"](source, saltenv) # Since we're allowing a path, let's make sure it exists if not os.path.exists(source): msg = "cert_file not found: {}".format(source) raise CommandExecutionError(msg) serial = get_cert_serial(source) cmd = 'certutil.exe -delstore {} "{}"'.format(store, serial) if retcode: return __salt__["cmd.retcode"](cmd) else: return __salt__["cmd.run"](cmd)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/win_certutil.py
0.675444
0.22007
win_certutil.py
pypi
import datetime import logging import os import salt.utils.path from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) LEA = salt.utils.path.which_bin( [ "certbot", "letsencrypt", "certbot-auto", "letsencrypt-auto", "/opt/letsencrypt/letsencrypt-auto", ] ) LE_LIVE = "/etc/letsencrypt/live/" if salt.utils.platform.is_freebsd(): LE_LIVE = "/usr/local" + LE_LIVE def __virtual__(): """ Only work when letsencrypt-auto is installed """ return ( LEA is not None, "The ACME execution module cannot be loaded: letsencrypt-auto not installed.", ) def _cert_file(name, cert_type): """ Return expected path of a Let's Encrypt live cert """ return os.path.join(LE_LIVE, name, "{}.pem".format(cert_type)) def _expires(name): """ Return the expiry date of a cert :rtype: datetime :return: Expiry date """ cert_file = _cert_file(name, "cert") # Use the salt module if available if "tls.cert_info" in __salt__: expiry = __salt__["tls.cert_info"](cert_file).get("not_after", 0) # Cobble it together using the openssl binary else: openssl_cmd = "openssl x509 -in {} -noout -enddate".format(cert_file) # No %e format on my Linux'es here strptime_sux_cmd = 'date --date="$({} | cut -d= -f2)" +%s'.format(openssl_cmd) expiry = float(__salt__["cmd.shell"](strptime_sux_cmd, output_loglevel="quiet")) # expiry = datetime.datetime.strptime(expiry.split('=', 1)[-1], '%b %e %H:%M:%S %Y %Z') return datetime.datetime.fromtimestamp(expiry) def _renew_by(name, window=None): """ Date before a certificate should be renewed :param str name: Name of the certificate :param int window: days before expiry date to renew :rtype: datetime :return: First renewal date """ expiry = _expires(name) if window is not None: expiry = expiry - datetime.timedelta(days=window) return expiry def cert( name, aliases=None, email=None, webroot=None, test_cert=False, renew=None, keysize=None, server=None, owner="root", group="root", mode="0640", certname=None, preferred_challenges=None, tls_sni_01_port=None, tls_sni_01_address=None, http_01_port=None, http_01_address=None, dns_plugin=None, dns_plugin_credentials=None, ): """ Obtain/renew a certificate from an ACME CA, probably Let's Encrypt. :param name: Common Name of the certificate (DNS name of certificate) :param aliases: subjectAltNames (Additional DNS names on certificate) :param email: e-mail address for interaction with ACME provider :param webroot: True or a full path to use to use webroot. Otherwise use standalone mode :param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server') :param renew: True/'force' to force a renewal, or a window of renewal before expiry in days :param keysize: RSA key bits :param server: API endpoint to talk to :param owner: owner of the private key file :param group: group of the private key file :param mode: mode of the private key file :param certname: Name of the certificate to save :param preferred_challenges: A sorted, comma delimited list of the preferred challenge to use during authorization with the most preferred challenge listed first. :param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects the port Certbot listens on. A conforming ACME server will still attempt to connect on port 443. :param tls_sni_01_address: The address the server listens to during tls-sni-01 challenge. :param http_01_port: Port used in the http-01 challenge. This only affects the port Certbot listens on. A conforming ACME server will still attempt to connect on port 80. :param https_01_address: The address the server listens to during http-01 challenge. :param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare' or 'digitalocean') :param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin :param dns_plugin_propagate_seconds: Number of seconds to wait for DNS propogations before asking ACME servers to verify the DNS record. (default 10) :rtype: dict :return: Dictionary with 'result' True/False/None, 'comment' and certificate's expiry date ('not_after') CLI Example: .. code-block:: bash salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True \ renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public """ cmd = [LEA, "certonly", "--non-interactive", "--agree-tos"] if certname is None: certname = name supported_dns_plugins = ["cloudflare"] cert_file = _cert_file(certname, "cert") if not __salt__["file.file_exists"](cert_file): log.debug("Certificate %s does not exist (yet)", cert_file) renew = False elif needs_renewal(certname, renew): log.debug("Certificate %s will be renewed", cert_file) cmd.append("--renew-by-default") renew = True if server: cmd.append("--server {}".format(server)) if certname: cmd.append("--cert-name {}".format(certname)) if test_cert: if server: return { "result": False, "comment": "Use either server or test_cert, not both", } cmd.append("--test-cert") if webroot: cmd.append("--authenticator webroot") if webroot is not True: cmd.append("--webroot-path {}".format(webroot)) elif dns_plugin in supported_dns_plugins: if dns_plugin == "cloudflare": cmd.append("--dns-cloudflare") cmd.append("--dns-cloudflare-credentials {}".format(dns_plugin_credentials)) else: return { "result": False, "comment": "DNS plugin '{}' is not supported".format(dns_plugin), } else: cmd.append("--authenticator standalone") if email: cmd.append("--email {}".format(email)) if keysize: cmd.append("--rsa-key-size {}".format(keysize)) cmd.append("--domains {}".format(name)) if aliases is not None: for dns in aliases: cmd.append("--domains {}".format(dns)) if preferred_challenges: cmd.append("--preferred-challenges {}".format(preferred_challenges)) if tls_sni_01_port: cmd.append("--tls-sni-01-port {}".format(tls_sni_01_port)) if tls_sni_01_address: cmd.append("--tls-sni-01-address {}".format(tls_sni_01_address)) if http_01_port: cmd.append("--http-01-port {}".format(http_01_port)) if http_01_address: cmd.append("--http-01-address {}".format(http_01_address)) res = __salt__["cmd.run_all"](" ".join(cmd)) if res["retcode"] != 0: if "expand" in res["stderr"]: cmd.append("--expand") res = __salt__["cmd.run_all"](" ".join(cmd)) if res["retcode"] != 0: return { "result": False, "comment": "Certificate {} renewal failed with:\n{}".format( name, res["stderr"] ), } else: return { "result": False, "comment": "Certificate {} renewal failed with:\n{}".format( name, res["stderr"] ), } if "no action taken" in res["stdout"]: comment = "Certificate {} unchanged".format(cert_file) result = None elif renew: comment = "Certificate {} renewed".format(certname) result = True else: comment = "Certificate {} obtained".format(certname) result = True ret = { "comment": comment, "not_after": expires(certname), "changes": {}, "result": result, } ret, _ = __salt__["file.check_perms"]( _cert_file(certname, "privkey"), ret, owner, group, mode, follow_symlinks=True ) return ret def certs(): """ Return a list of active certificates CLI Example: .. code-block:: bash salt 'vhost.example.com' acme.certs """ return [ item for item in __salt__["file.readdir"](LE_LIVE)[2:] if os.path.isdir(os.path.join(LE_LIVE, item)) ] def info(name): """ Return information about a certificate :param str name: Name of certificate :rtype: dict :return: Dictionary with information about the certificate. If neither the ``tls`` nor the ``x509`` module can be used to determine the certificate information, the information will be retrieved as one big text block under the key ``text`` using the openssl cli. CLI Example: .. code-block:: bash salt 'gitlab.example.com' acme.info dev.example.com """ if not has(name): return {} cert_file = _cert_file(name, "cert") # Use the tls salt module if available if "tls.cert_info" in __salt__: cert_info = __salt__["tls.cert_info"](cert_file) # Strip out the extensions object contents; # these trip over our poor state output # and they serve no real purpose here anyway cert_info["extensions"] = list(cert_info["extensions"]) elif "x509.read_certificate" in __salt__: cert_info = __salt__["x509.read_certificate"](cert_file) else: # Cobble it together using the openssl binary openssl_cmd = "openssl x509 -in {} -noout -text".format(cert_file) cert_info = {"text": __salt__["cmd.run"](openssl_cmd, output_loglevel="quiet")} return cert_info def expires(name): """ The expiry date of a certificate in ISO format :param str name: Name of certificate :rtype: str :return: Expiry date in ISO format. CLI Example: .. code-block:: bash salt 'gitlab.example.com' acme.expires dev.example.com """ return _expires(name).isoformat() def has(name): """ Test if a certificate is in the Let's Encrypt Live directory :param str name: Name of certificate :rtype: bool Code example: .. code-block:: python if __salt__['acme.has']('dev.example.com'): log.info('That is one nice certificate you have there!') """ return __salt__["file.file_exists"](_cert_file(name, "cert")) def renew_by(name, window=None): """ Date in ISO format when a certificate should first be renewed :param str name: Name of certificate :param int window: number of days before expiry when renewal should take place :rtype: str :return: Date of certificate renewal in ISO format. """ return _renew_by(name, window).isoformat() def needs_renewal(name, window=None): """ Check if a certificate needs renewal :param str name: Name of certificate :param bool/str/int window: Window in days to renew earlier or True/force to just return True :rtype: bool :return: Whether or not the certificate needs to be renewed. Code example: .. code-block:: python if __salt__['acme.needs_renewal']('dev.example.com'): __salt__['acme.cert']('dev.example.com', **kwargs) else: log.info('Your certificate is still good') """ if window: if str(window).lower() in ("force", "true"): return True if not ( isinstance(window, int) or (hasattr(window, "isdigit") and window.isdigit()) ): raise SaltInvocationError( 'The argument "window", if provided, must be one of the following : ' 'True (boolean), "force" or "Force" (str) or a numerical value in days.' ) window = int(window) return _renew_by(name, window) <= datetime.datetime.today()
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/acme.py
0.569853
0.180035
acme.py
pypi
import logging log = logging.getLogger(__name__) def _analyse_overview_field(content): """ Split the field in drbd-overview """ if "(" in content: # Output like "Connected(2*)" or "UpToDate(2*)" return content.split("(")[0], content.split("(")[0] elif "/" in content: # Output like "Primar/Second" or "UpToDa/UpToDa" return content.split("/")[0], content.split("/")[1] return content, "" def _count_spaces_startswith(line): """ Count the number of spaces before the first character """ if line.split("#")[0].strip() == "": return None spaces = 0 for i in line: if i.isspace(): spaces += 1 else: return spaces def _analyse_status_type(line): """ Figure out the sections in drbdadm status """ spaces = _count_spaces_startswith(line) if spaces is None: return "" switch = { 0: "RESOURCE", 2: {" disk:": "LOCALDISK", " role:": "PEERNODE", " connection:": "PEERNODE"}, 4: {" peer-disk:": "PEERDISK"}, } ret = switch.get(spaces, "UNKNOWN") if isinstance(ret, str): return ret for x in ret: if x in line: return ret[x] return "UNKNOWN" def _add_res(line): """ Analyse the line of local resource of ``drbdadm status`` """ global resource fields = line.strip().split() if resource: ret.append(resource) resource = {} resource["resource name"] = fields[0] resource["local role"] = fields[1].split(":")[1] resource["local volumes"] = [] resource["peer nodes"] = [] def _add_volume(line): """ Analyse the line of volumes of ``drbdadm status`` """ section = _analyse_status_type(line) fields = line.strip().split() volume = {} for field in fields: volume[field.split(":")[0]] = field.split(":")[1] if section == "LOCALDISK": resource["local volumes"].append(volume) else: # 'PEERDISK' lastpnodevolumes.append(volume) def _add_peernode(line): """ Analyse the line of peer nodes of ``drbdadm status`` """ global lastpnodevolumes fields = line.strip().split() peernode = {} peernode["peernode name"] = fields[0] # Could be role or connection: peernode[fields[1].split(":")[0]] = fields[1].split(":")[1] peernode["peer volumes"] = [] resource["peer nodes"].append(peernode) lastpnodevolumes = peernode["peer volumes"] def _empty(dummy): """ Action of empty line of ``drbdadm status`` """ def _unknown_parser(line): """ Action of unsupported line of ``drbdadm status`` """ global ret ret = {"Unknown parser": line} def _line_parser(line): """ Call action for different lines """ section = _analyse_status_type(line) fields = line.strip().split() switch = { "": _empty, "RESOURCE": _add_res, "PEERNODE": _add_peernode, "LOCALDISK": _add_volume, "PEERDISK": _add_volume, } func = switch.get(section, _unknown_parser) func(line) def overview(): """ Show status of the DRBD devices, support two nodes only. drbd-overview is removed since drbd-utils-9.6.0, use status instead. CLI Example: .. code-block:: bash salt '*' drbd.overview """ cmd = "drbd-overview" for line in __salt__["cmd.run"](cmd).splitlines(): ret = {} fields = line.strip().split() minnum = fields[0].split(":")[0] device = fields[0].split(":")[1] connstate, _ = _analyse_overview_field(fields[1]) localrole, partnerrole = _analyse_overview_field(fields[2]) localdiskstate, partnerdiskstate = _analyse_overview_field(fields[3]) if localdiskstate.startswith("UpTo"): if partnerdiskstate.startswith("UpTo"): if len(fields) >= 5: mountpoint = fields[4] fs_mounted = fields[5] totalsize = fields[6] usedsize = fields[7] remainsize = fields[8] perc = fields[9] ret = { "minor number": minnum, "device": device, "connection state": connstate, "local role": localrole, "partner role": partnerrole, "local disk state": localdiskstate, "partner disk state": partnerdiskstate, "mountpoint": mountpoint, "fs": fs_mounted, "total size": totalsize, "used": usedsize, "remains": remainsize, "percent": perc, } else: ret = { "minor number": minnum, "device": device, "connection state": connstate, "local role": localrole, "partner role": partnerrole, "local disk state": localdiskstate, "partner disk state": partnerdiskstate, } else: syncbar = fields[4] synced = fields[6] syncedbytes = fields[7] sync = synced + syncedbytes ret = { "minor number": minnum, "device": device, "connection state": connstate, "local role": localrole, "partner role": partnerrole, "local disk state": localdiskstate, "partner disk state": partnerdiskstate, "synchronisation: ": syncbar, "synched": sync, } return ret # Global para for func status ret = [] resource = {} lastpnodevolumes = None def status(name="all"): """ Using drbdadm to show status of the DRBD devices, available in the latest drbd9. Support multiple nodes, multiple volumes. :type name: str :param name: Resource name. :return: drbd status of resource. :rtype: list(dict(res)) CLI Example: .. code-block:: bash salt '*' drbd.status salt '*' drbd.status name=<resource name> """ # Initialize for multiple times test cases global ret global resource ret = [] resource = {} cmd = ["drbdadm", "status"] cmd.append(name) # One possible output: (number of resource/node/vol are flexible) # resource role:Secondary # volume:0 disk:Inconsistent # volume:1 disk:Inconsistent # drbd-node1 role:Primary # volume:0 replication:SyncTarget peer-disk:UpToDate done:10.17 # volume:1 replication:SyncTarget peer-disk:UpToDate done:74.08 # drbd-node2 role:Secondary # volume:0 peer-disk:Inconsistent resync-suspended:peer # volume:1 peer-disk:Inconsistent resync-suspended:peer for line in __salt__["cmd.run"](cmd).splitlines(): _line_parser(line) if resource: ret.append(resource) return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/drbd.py
0.619817
0.23725
drbd.py
pypi
import re import salt.utils.platform import salt.utils.win_lgpo_netsh from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = "firewall" def __virtual__(): """ Only works on Windows systems """ if not salt.utils.platform.is_windows(): return False, "Module win_firewall: module only available on Windows" return __virtualname__ def get_config(): """ Get the status of all the firewall profiles Returns: dict: A dictionary of all profiles on the system Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.get_config """ profiles = {} curr = None cmd = ["netsh", "advfirewall", "show", "allprofiles"] ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True) if ret["retcode"] != 0: raise CommandExecutionError(ret["stdout"]) # There may be some problems with this depending on how `netsh` is localized # It's looking for lines that contain `Profile Settings` or start with # `State` which may be different in different localizations for line in ret["stdout"].splitlines(): if not curr: tmp = re.search("(.*) Profile Settings:", line) if tmp: curr = tmp.group(1) elif line.startswith("State"): profiles[curr] = line.split()[1] == "ON" curr = None return profiles def disable(profile="allprofiles"): """ Disable firewall profile Args: profile (Optional[str]): The name of the profile to disable. Default is ``allprofiles``. Valid options are: - allprofiles - domainprofile - privateprofile - publicprofile Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.disable """ cmd = ["netsh", "advfirewall", "set", profile, "state", "off"] ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True) if ret["retcode"] != 0: raise CommandExecutionError(ret["stdout"]) return True def enable(profile="allprofiles"): """ .. versionadded:: 2015.5.0 Enable firewall profile Args: profile (Optional[str]): The name of the profile to enable. Default is ``allprofiles``. Valid options are: - allprofiles - domainprofile - privateprofile - publicprofile Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.enable """ cmd = ["netsh", "advfirewall", "set", profile, "state", "on"] ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True) if ret["retcode"] != 0: raise CommandExecutionError(ret["stdout"]) return True def get_rule(name="all"): """ .. versionadded:: 2015.5.0 Display all matching rules as specified by name Args: name (Optional[str]): The full name of the rule. ``all`` will return all rules. Default is ``all`` Returns: dict: A dictionary of all rules or rules that match the name exactly Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.get_rule 'MyAppPort' """ cmd = ["netsh", "advfirewall", "firewall", "show", "rule", "name={}".format(name)] ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True) if ret["retcode"] != 0: raise CommandExecutionError(ret["stdout"]) return {name: ret["stdout"]} def add_rule(name, localport, protocol="tcp", action="allow", dir="in", remoteip="any"): """ .. versionadded:: 2015.5.0 Add a new inbound or outbound rule to the firewall policy Args: name (str): The name of the rule. Must be unique and cannot be "all". Required. localport (int): The port the rule applies to. Must be a number between 0 and 65535. Can be a range. Can specify multiple ports separated by commas. Required. protocol (Optional[str]): The protocol. Can be any of the following: - A number between 0 and 255 - icmpv4 - icmpv6 - tcp - udp - any action (Optional[str]): The action the rule performs. Can be any of the following: - allow - block - bypass dir (Optional[str]): The direction. Can be ``in`` or ``out``. remoteip (Optional [str]): The remote IP. Can be any of the following: - any - localsubnet - dns - dhcp - wins - defaultgateway - Any valid IPv4 address (192.168.0.12) - Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43) - Any valid subnet (192.168.1.0/24) - Any valid range of IP addresses (192.168.0.1-192.168.0.12) - A list of valid IP addresses Can be combinations of the above separated by commas. Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.add_rule 'test' '8080' 'tcp' salt '*' firewall.add_rule 'test' '1' 'icmpv4' salt '*' firewall.add_rule 'test_remote_ip' '8000' 'tcp' 'allow' 'in' '192.168.0.1' """ cmd = [ "netsh", "advfirewall", "firewall", "add", "rule", "name={}".format(name), "protocol={}".format(protocol), "dir={}".format(dir), "action={}".format(action), "remoteip={}".format(remoteip), ] if protocol is None or ("icmpv4" not in protocol and "icmpv6" not in protocol): cmd.append("localport={}".format(localport)) ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True) if ret["retcode"] != 0: raise CommandExecutionError(ret["stdout"]) return True def delete_rule(name=None, localport=None, protocol=None, dir=None, remoteip=None): """ .. versionadded:: 2015.8.0 Delete an existing firewall rule identified by name and optionally by ports, protocols, direction, and remote IP. Args: name (str): The name of the rule to delete. If the name ``all`` is used you must specify additional parameters. localport (Optional[str]): The port of the rule. If protocol is not specified, protocol will be set to ``tcp`` protocol (Optional[str]): The protocol of the rule. Default is ``tcp`` when ``localport`` is specified dir (Optional[str]): The direction of the rule. remoteip (Optional[str]): The remote IP of the rule. Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash # Delete incoming tcp port 8080 in the rule named 'test' salt '*' firewall.delete_rule 'test' '8080' 'tcp' 'in' # Delete the incoming tcp port 8000 from 192.168.0.1 in the rule named # 'test_remote_ip' salt '*' firewall.delete_rule 'test_remote_ip' '8000' 'tcp' 'in' '192.168.0.1' # Delete all rules for local port 80: salt '*' firewall.delete_rule all 80 tcp # Delete a rule called 'allow80': salt '*' firewall.delete_rule allow80 """ cmd = ["netsh", "advfirewall", "firewall", "delete", "rule"] if name: cmd.append("name={}".format(name)) if protocol: cmd.append("protocol={}".format(protocol)) if dir: cmd.append("dir={}".format(dir)) if remoteip: cmd.append("remoteip={}".format(remoteip)) if protocol is None or ("icmpv4" not in protocol and "icmpv6" not in protocol): if localport: if not protocol: cmd.append("protocol=tcp") cmd.append("localport={}".format(localport)) ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True) if ret["retcode"] != 0: raise CommandExecutionError(ret["stdout"]) return True def rule_exists(name): """ .. versionadded:: 2016.11.6 Checks if a firewall rule exists in the firewall policy Args: name (str): The name of the rule Returns: bool: True if exists, otherwise False CLI Example: .. code-block:: bash # Is there a rule named RemoteDesktop salt '*' firewall.rule_exists RemoteDesktop """ try: get_rule(name) return True except CommandExecutionError: return False def get_settings(profile, section, store="local"): """ Get the firewall property from the specified profile in the specified store as returned by ``netsh advfirewall``. .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: profile (str): The firewall profile to query. Valid options are: - domain - public - private section (str): The property to query within the selected profile. Valid options are: - firewallpolicy : inbound/outbound behavior - logging : firewall logging settings - settings : firewall properties - state : firewalls state (on | off) store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: dict: A dictionary containing the properties for the specified profile Raises: CommandExecutionError: If an error occurs ValueError: If the parameters are incorrect CLI Example: .. code-block:: bash # Get the inbound/outbound firewall settings for connections on the # local domain profile salt * win_firewall.get_settings domain firewallpolicy # Get the inbound/outbound firewall settings for connections on the # domain profile as defined by local group policy salt * win_firewall.get_settings domain firewallpolicy lgpo """ return salt.utils.win_lgpo_netsh.get_settings( profile=profile, section=section, store=store ) def get_all_settings(domain, store="local"): """ Gets all the properties for the specified profile in the specified store .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: profile (str): The firewall profile to query. Valid options are: - domain - public - private store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: dict: A dictionary containing the specified settings CLI Example: .. code-block:: bash # Get all firewall settings for connections on the domain profile salt * win_firewall.get_all_settings domain # Get all firewall settings for connections on the domain profile as # defined by local group policy salt * win_firewall.get_all_settings domain lgpo """ return salt.utils.win_lgpo_netsh.get_all_settings(profile=domain, store=store) def get_all_profiles(store="local"): """ Gets all properties for all profiles in the specified store .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: dict: A dictionary containing the specified settings for each profile CLI Example: .. code-block:: bash # Get all firewall settings for all profiles salt * firewall.get_all_settings # Get all firewall settings for all profiles as defined by local group # policy salt * firewall.get_all_settings lgpo """ return salt.utils.win_lgpo_netsh.get_all_profiles(store=store) def set_firewall_settings(profile, inbound=None, outbound=None, store="local"): """ Set the firewall inbound/outbound settings for the specified profile and store .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: profile (str): The firewall profile to query. Valid options are: - domain - public - private inbound (str): The inbound setting. If ``None`` is passed, the setting will remain unchanged. Valid values are: - blockinbound - blockinboundalways - allowinbound - notconfigured Default is ``None`` outbound (str): The outbound setting. If ``None`` is passed, the setting will remain unchanged. Valid values are: - allowoutbound - blockoutbound - notconfigured Default is ``None`` store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: bool: ``True`` if successful Raises: CommandExecutionError: If an error occurs ValueError: If the parameters are incorrect CLI Example: .. code-block:: bash # Set the inbound setting for the domain profile to block inbound # connections salt * firewall.set_firewall_settings domain='domain' inbound='blockinbound' # Set the outbound setting for the domain profile to allow outbound # connections salt * firewall.set_firewall_settings domain='domain' outbound='allowoutbound' # Set inbound/outbound settings for the domain profile in the group # policy to block inbound and allow outbound salt * firewall.set_firewall_settings domain='domain' inbound='blockinbound' outbound='allowoutbound' store='lgpo' """ return salt.utils.win_lgpo_netsh.set_firewall_settings( profile=profile, inbound=inbound, outbound=outbound, store=store ) def set_logging_settings(profile, setting, value, store="local"): r""" Configure logging settings for the Windows firewall. .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: profile (str): The firewall profile to configure. Valid options are: - domain - public - private setting (str): The logging setting to configure. Valid options are: - allowedconnections - droppedconnections - filename - maxfilesize value (str): The value to apply to the setting. Valid values are dependent upon the setting being configured. Valid options are: allowedconnections: - enable - disable - notconfigured droppedconnections: - enable - disable - notconfigured filename: - Full path and name of the firewall log file - notconfigured maxfilesize: - 1 - 32767 - notconfigured .. note:: ``notconfigured`` can only be used when using the lgpo store store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: bool: ``True`` if successful Raises: CommandExecutionError: If an error occurs ValueError: If the parameters are incorrect CLI Example: .. code-block:: bash # Log allowed connections and set that in local group policy salt * firewall.set_logging_settings domain allowedconnections enable lgpo # Don't log dropped connections salt * firewall.set_logging_settings profile=private setting=droppedconnections value=disable # Set the location of the log file salt * firewall.set_logging_settings domain filename C:\windows\logs\firewall.log # You can also use environment variables salt * firewall.set_logging_settings domain filename %systemroot%\system32\LogFiles\Firewall\pfirewall.log # Set the max file size of the log to 2048 Kb salt * firewall.set_logging_settings domain maxfilesize 2048 """ return salt.utils.win_lgpo_netsh.set_logging_settings( profile=profile, setting=setting, value=value, store=store ) def set_settings(profile, setting, value, store="local"): """ Configure firewall settings. .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: profile (str): The firewall profile to configure. Valid options are: - domain - public - private setting (str): The firewall setting to configure. Valid options are: - localfirewallrules - localconsecrules - inboundusernotification - remotemanagement - unicastresponsetomulticast value (str): The value to apply to the setting. Valid options are - enable - disable - notconfigured .. note:: ``notconfigured`` can only be used when using the lgpo store store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: bool: ``True`` if successful Raises: CommandExecutionError: If an error occurs ValueError: If the parameters are incorrect CLI Example: .. code-block:: bash # Merge local rules with those distributed through group policy salt * firewall.set_settings domain localfirewallrules enable # Allow remote management of Windows Firewall salt * firewall.set_settings domain remotemanagement enable """ return salt.utils.win_lgpo_netsh.set_settings( profile=profile, setting=setting, value=value, store=store ) def set_state(profile, state, store="local"): """ Configure the firewall state. .. versionadded:: 2018.3.4 .. versionadded:: 2019.2.0 Args: profile (str): The firewall profile to configure. Valid options are: - domain - public - private state (str): The firewall state. Valid options are: - on - off - notconfigured .. note:: ``notconfigured`` can only be used when using the lgpo store store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: bool: ``True`` if successful Raises: CommandExecutionError: If an error occurs ValueError: If the parameters are incorrect CLI Example: .. code-block:: bash # Turn the firewall off when the domain profile is active salt * firewall.set_state domain off # Turn the firewall on when the public profile is active and set that in # the local group policy salt * firewall.set_state public on lgpo """ return salt.utils.win_lgpo_netsh.set_state( profile=profile, state=state, store=store )
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/win_firewall.py
0.798344
0.204183
win_firewall.py
pypi
import logging import os import re import salt.utils.path from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logger log = logging.getLogger(__name__) # Define a function alias in order not to shadow built-in's __func_alias__ = {"list_": "list"} # Define the module's virtual name __virtualname__ = "raid" _VOL_REGEX_PATTERN_MATCH = r"^ARRAY\s+{0}\s+.*$" def __virtual__(): """ mdadm provides raid functions for Linux """ if __grains__["kernel"] != "Linux": return ( False, "The mdadm execution module cannot be loaded: only available on Linux.", ) if not salt.utils.path.which("mdadm"): return ( False, "The mdadm execution module cannot be loaded: the mdadm binary is not in" " the path.", ) return __virtualname__ def list_(): """ List the RAID devices. CLI Example: .. code-block:: bash salt '*' raid.list """ ret = {} for line in __salt__["cmd.run_stdout"]( ["mdadm", "--detail", "--scan"], python_shell=False ).splitlines(): if " " not in line: continue comps = line.split() device = comps[1] ret[device] = {"device": device} for comp in comps[2:]: key = comp.split("=")[0].lower() value = comp.split("=")[1] ret[device][key] = value return ret def detail(device="/dev/md0"): """ Show detail for a specified RAID device CLI Example: .. code-block:: bash salt '*' raid.detail '/dev/md0' """ ret = {} ret["members"] = {} # Lets make sure the device exists before running mdadm if not os.path.exists(device): msg = "Device {0} doesn't exist!" raise CommandExecutionError(msg.format(device)) cmd = ["mdadm", "--detail", device] for line in __salt__["cmd.run_stdout"](cmd, python_shell=False).splitlines(): if line.startswith(device): continue if " " not in line: continue if ":" not in line: if "/dev/" in line: comps = line.split() state = comps[4:-1] ret["members"][comps[0]] = { "device": comps[-1], "major": comps[1], "minor": comps[2], "number": comps[0], "raiddevice": comps[3], "state": " ".join(state), } continue comps = line.split(" : ") comps[0] = comps[0].lower() comps[0] = comps[0].strip() comps[0] = comps[0].replace(" ", "_") ret[comps[0]] = comps[1].strip() return ret def destroy(device): """ Destroy a RAID device. WARNING This will zero the superblock of all members of the RAID array.. CLI Example: .. code-block:: bash salt '*' raid.destroy /dev/md0 """ try: details = detail(device) except CommandExecutionError: return False stop_cmd = ["mdadm", "--stop", device] zero_cmd = ["mdadm", "--zero-superblock"] if __salt__["cmd.retcode"](stop_cmd, python_shell=False) == 0: for number in details["members"]: zero_cmd.append(details["members"][number]["device"]) __salt__["cmd.retcode"](zero_cmd, python_shell=False) # Remove entry from config file: if __grains__.get("os_family") == "Debian": cfg_file = "/etc/mdadm/mdadm.conf" else: cfg_file = "/etc/mdadm.conf" try: __salt__["file.replace"](cfg_file, "ARRAY {} .*".format(device), "") except SaltInvocationError: pass if __salt__["raid.list"]().get(device) is None: return True else: return False def stop(): """ Shut down all arrays that can be shut down (i.e. are not currently in use). CLI Example: .. code-block:: bash salt '*' raid.stop """ cmd = "mdadm --stop --scan" if __salt__["cmd.retcode"](cmd): return True return False def create(name, level, devices, metadata="default", test_mode=False, **kwargs): """ Create a RAID device. .. versionchanged:: 2014.7.0 .. warning:: Use with CAUTION, as this function can be very destructive if not used properly! CLI Examples: .. code-block:: bash salt '*' raid.create /dev/md0 level=1 chunk=256 devices="['/dev/xvdd', '/dev/xvde']" test_mode=True .. note:: Adding ``test_mode=True`` as an argument will print out the mdadm command that would have been run. name The name of the array to create. level The RAID level to use when creating the raid. devices A list of devices used to build the array. metadata Version of metadata to use when creating the array. kwargs Optional arguments to be passed to mdadm. returns test_mode=True: Prints out the full command. test_mode=False (Default): Executes command on remote the host(s) and Prints out the mdadm output. .. note:: It takes time to create a RAID array. You can check the progress in "resync_status:" field of the results from the following command: .. code-block:: bash salt '*' raid.detail /dev/md0 For more info, read the ``mdadm(8)`` manpage """ opts = [] raid_devices = len(devices) for key in kwargs: if not key.startswith("__"): opts.append("--{}".format(key)) if kwargs[key] is not True: opts.append(str(kwargs[key])) if key == "spare-devices": raid_devices -= int(kwargs[key]) cmd = ( ["mdadm", "-C", name, "-R", "-v", "-l", str(level)] + opts + ["-e", str(metadata), "-n", str(raid_devices)] + devices ) cmd_str = " ".join(cmd) if test_mode is True: return cmd_str elif test_mode is False: return __salt__["cmd.run"](cmd, python_shell=False) def save_config(): """ Save RAID configuration to config file. Same as: mdadm --detail --scan >> /etc/mdadm/mdadm.conf Fixes this issue with Ubuntu REF: http://askubuntu.com/questions/209702/why-is-my-raid-dev-md1-showing-up-as-dev-md126-is-mdadm-conf-being-ignored CLI Example: .. code-block:: bash salt '*' raid.save_config """ scan = __salt__["cmd.run"]("mdadm --detail --scan", python_shell=False).splitlines() # Issue with mdadm and ubuntu # REF: http://askubuntu.com/questions/209702/why-is-my-raid-dev-md1-showing-up-as-dev-md126-is-mdadm-conf-being-ignored if __grains__["os"] == "Ubuntu": buggy_ubuntu_tags = ["name", "metadata"] for i, elem in enumerate(scan): for bad_tag in buggy_ubuntu_tags: pattern = r"\s{}=\S+".format(re.escape(bad_tag)) pattern = re.compile(pattern, flags=re.I) scan[i] = re.sub(pattern, "", scan[i]) if __grains__.get("os_family") == "Debian": cfg_file = "/etc/mdadm/mdadm.conf" else: cfg_file = "/etc/mdadm.conf" try: vol_d = {line.split()[1]: line for line in scan} for vol in vol_d: pattern = _VOL_REGEX_PATTERN_MATCH.format(re.escape(vol)) __salt__["file.replace"]( cfg_file, pattern, vol_d[vol], append_if_not_found=True ) except SaltInvocationError: # File is missing __salt__["file.write"](cfg_file, args=scan) if __grains__.get("os_family") == "Debian": return __salt__["cmd.run"]("update-initramfs -u") elif __grains__.get("os_family") == "RedHat": return __salt__["cmd.run"]("dracut --force") def assemble(name, devices, test_mode=False, **kwargs): """ Assemble a RAID device. CLI Examples: .. code-block:: bash salt '*' raid.assemble /dev/md0 ['/dev/xvdd', '/dev/xvde'] .. note:: Adding ``test_mode=True`` as an argument will print out the mdadm command that would have been run. name The name of the array to assemble. devices The list of devices comprising the array to assemble. kwargs Optional arguments to be passed to mdadm. returns test_mode=True: Prints out the full command. test_mode=False (Default): Executes command on the host(s) and prints out the mdadm output. For more info, read the ``mdadm`` manpage. """ opts = [] for key in kwargs: if not key.startswith("__"): opts.append("--{}".format(key)) if kwargs[key] is not True: opts.append(kwargs[key]) # Devices may have been written with a blob: if isinstance(devices, str): devices = devices.split(",") cmd = ["mdadm", "-A", name, "-v"] + opts + devices if test_mode is True: return cmd elif test_mode is False: return __salt__["cmd.run"](cmd, python_shell=False) def examine(device, quiet=False): """ Show detail for a specified RAID component device device Device to examine, that is part of the RAID quiet If the device is not part of the RAID, do not show any error CLI Example: .. code-block:: bash salt '*' raid.examine '/dev/sda1' """ res = __salt__["cmd.run_stdout"]( "mdadm -Y -E {}".format(device), python_shell=False, ignore_retcode=quiet ) ret = {} for line in res.splitlines(): name, var = line.partition("=")[::2] ret[name] = var return ret def add(name, device): """ Add new device to RAID array. CLI Example: .. code-block:: bash salt '*' raid.add /dev/md0 /dev/sda1 """ cmd = "mdadm --manage {} --add {}".format(name, device) if __salt__["cmd.retcode"](cmd) == 0: return True return False
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/mdadm_raid.py
0.488283
0.178956
mdadm_raid.py
pypi
import logging import time import salt.proxy.panos import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) __virtualname__ = "panos" def __virtual__(): """ Will load for the panos proxy minions. """ try: if salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "panos": return __virtualname__ except KeyError: pass return ( False, "The panos execution module can only be loaded for panos proxy minions.", ) def _get_job_results(query=None): """ Executes a query that requires a job for completion. This function will wait for the job to complete and return the results. """ if not query: raise CommandExecutionError("Query parameters cannot be empty.") response = __proxy__["panos.call"](query) # If the response contains a job, we will wait for the results if "result" in response and "job" in response["result"]: jid = response["result"]["job"] while get_job(jid)["result"]["job"]["status"] != "FIN": time.sleep(5) return get_job(jid) else: return response def add_config_lock(): """ Prevent other users from changing configuration until the lock is released. CLI Example: .. code-block:: bash salt '*' panos.add_config_lock """ query = { "type": "op", "cmd": "<request><config-lock><add></add></config-lock></request>", } return __proxy__["panos.call"](query) def check_antivirus(): """ Get anti-virus information from PaloAlto Networks server CLI Example: .. code-block:: bash salt '*' panos.check_antivirus """ query = { "type": "op", "cmd": "<request><anti-virus><upgrade><check></check></upgrade></anti-virus></request>", } return __proxy__["panos.call"](query) def check_software(): """ Get software information from PaloAlto Networks server. CLI Example: .. code-block:: bash salt '*' panos.check_software """ query = { "type": "op", "cmd": ( "<request><system><software><check></check></software></system></request>" ), } return __proxy__["panos.call"](query) def clear_commit_tasks(): """ Clear all commit tasks. CLI Example: .. code-block:: bash salt '*' panos.clear_commit_tasks """ query = { "type": "op", "cmd": "<request><clear-commit-tasks></clear-commit-tasks></request>", } return __proxy__["panos.call"](query) def commit(): """ Commits the candidate configuration to the running configuration. CLI Example: .. code-block:: bash salt '*' panos.commit """ query = {"type": "commit", "cmd": "<commit></commit>"} return _get_job_results(query) def deactivate_license(key_name=None): """ Deactivates an installed license. Required version 7.0.0 or greater. key_name(str): The file name of the license key installed. CLI Example: .. code-block:: bash salt '*' panos.deactivate_license key_name=License_File_Name.key """ _required_version = "7.0.0" if not __proxy__["panos.is_required_version"](_required_version): return ( False, "The panos device requires version {} or greater for this command.".format( _required_version ), ) if not key_name: return False, "You must specify a key_name." else: query = { "type": "op", "cmd": ( "<request><license><deactivate><key><features><member>{}</member></features>" "</key></deactivate></license></request>".format(key_name) ), } return __proxy__["panos.call"](query) def delete_license(key_name=None): """ Remove license keys on disk. key_name(str): The file name of the license key to be deleted. CLI Example: .. code-block:: bash salt '*' panos.delete_license key_name=License_File_Name.key """ if not key_name: return False, "You must specify a key_name." else: query = { "type": "op", "cmd": "<delete><license><key>{}</key></license></delete>".format(key_name), } return __proxy__["panos.call"](query) def download_antivirus(): """ Download the most recent anti-virus package. CLI Example: .. code-block:: bash salt '*' panos.download_antivirus """ query = { "type": "op", "cmd": ( "<request><anti-virus><upgrade><download>" "<latest></latest></download></upgrade></anti-virus></request>" ), } return _get_job_results(query) def download_software_file(filename=None, synch=False): """ Download software packages by filename. Args: filename(str): The filename of the PANOS file to download. synch (bool): If true then the file will synch to the peer unit. CLI Example: .. code-block:: bash salt '*' panos.download_software_file PanOS_5000-8.0.0 salt '*' panos.download_software_file PanOS_5000-8.0.0 True """ if not filename: raise CommandExecutionError("Filename option must not be none.") if not isinstance(synch, bool): raise CommandExecutionError("Synch option must be boolean..") if synch is True: query = { "type": "op", "cmd": ( "<request><system><software><download>" "<file>{}</file></download></software></system></request>".format( filename ) ), } else: query = { "type": "op", "cmd": ( "<request><system><software><download><sync-to-peer>yes</sync-to-peer>" "<file>{}</file></download></software></system></request>".format( filename ) ), } return _get_job_results(query) def download_software_version(version=None, synch=False): """ Download software packages by version number. Args: version(str): The version of the PANOS file to download. synch (bool): If true then the file will synch to the peer unit. CLI Example: .. code-block:: bash salt '*' panos.download_software_version 8.0.0 salt '*' panos.download_software_version 8.0.0 True """ if not version: raise CommandExecutionError("Version option must not be none.") if not isinstance(synch, bool): raise CommandExecutionError("Synch option must be boolean..") if synch is True: query = { "type": "op", "cmd": ( "<request><system><software><download>" "<version>{}</version></download></software></system></request>".format( version ) ), } else: query = { "type": "op", "cmd": ( "<request><system><software><download><sync-to-peer>yes</sync-to-peer>" "<version>{}</version></download></software></system></request>".format( version ) ), } return _get_job_results(query) def fetch_license(auth_code=None): """ Get new license(s) using from the Palo Alto Network Server. auth_code The license authorization code. CLI Example: .. code-block:: bash salt '*' panos.fetch_license salt '*' panos.fetch_license auth_code=foobar """ if not auth_code: query = { "type": "op", "cmd": "<request><license><fetch></fetch></license></request>", } else: query = { "type": "op", "cmd": ( "<request><license><fetch><auth-code>{}</auth-code></fetch></license>" "</request>".format(auth_code) ), } return __proxy__["panos.call"](query) def get_address(address=None, vsys="1"): """ Get the candidate configuration for the specified get_address object. This will not return address objects that are marked as pre-defined objects. address(str): The name of the address object. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_address myhost salt '*' panos.get_address myhost 3 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "address/entry[@name='{}']".format(vsys, address) ), } return __proxy__["panos.call"](query) def get_address_group(addressgroup=None, vsys="1"): """ Get the candidate configuration for the specified address group. This will not return address groups that are marked as pre-defined objects. addressgroup(str): The name of the address group. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_address_group foobar salt '*' panos.get_address_group foobar 3 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "address-group/entry[@name='{}']".format(vsys, addressgroup) ), } return __proxy__["panos.call"](query) def get_admins_active(): """ Show active administrators. CLI Example: .. code-block:: bash salt '*' panos.get_admins_active """ query = {"type": "op", "cmd": "<show><admins></admins></show>"} return __proxy__["panos.call"](query) def get_admins_all(): """ Show all administrators. CLI Example: .. code-block:: bash salt '*' panos.get_admins_all """ query = {"type": "op", "cmd": "<show><admins><all></all></admins></show>"} return __proxy__["panos.call"](query) def get_antivirus_info(): """ Show information about available anti-virus packages. CLI Example: .. code-block:: bash salt '*' panos.get_antivirus_info """ query = { "type": "op", "cmd": "<request><anti-virus><upgrade><info></info></upgrade></anti-virus></request>", } return __proxy__["panos.call"](query) def get_arp(): """ Show ARP information. CLI Example: .. code-block:: bash salt '*' panos.get_arp """ query = {"type": "op", "cmd": "<show><arp><entry name = 'all'/></arp></show>"} return __proxy__["panos.call"](query) def get_cli_idle_timeout(): """ Show timeout information for this administrative session. CLI Example: .. code-block:: bash salt '*' panos.get_cli_idle_timeout """ query = { "type": "op", "cmd": "<show><cli><idle-timeout></idle-timeout></cli></show>", } return __proxy__["panos.call"](query) def get_cli_permissions(): """ Show cli administrative permissions. CLI Example: .. code-block:: bash salt '*' panos.get_cli_permissions """ query = {"type": "op", "cmd": "<show><cli><permissions></permissions></cli></show>"} return __proxy__["panos.call"](query) def get_disk_usage(): """ Report filesystem disk space usage. CLI Example: .. code-block:: bash salt '*' panos.get_disk_usage """ query = { "type": "op", "cmd": "<show><system><disk-space></disk-space></system></show>", } return __proxy__["panos.call"](query) def get_dns_server_config(): """ Get the DNS server configuration from the candidate configuration. CLI Example: .. code-block:: bash salt '*' panos.get_dns_server_config """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/dns-setting/servers", } return __proxy__["panos.call"](query) def get_domain_config(): """ Get the domain name configuration from the candidate configuration. CLI Example: .. code-block:: bash salt '*' panos.get_domain_config """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain", } return __proxy__["panos.call"](query) def get_dos_blocks(): """ Show the DoS block-ip table. CLI Example: .. code-block:: bash salt '*' panos.get_dos_blocks """ query = { "type": "op", "cmd": "<show><dos-block-table><all></all></dos-block-table></show>", } return __proxy__["panos.call"](query) def get_fqdn_cache(): """ Print FQDNs used in rules and their IPs. CLI Example: .. code-block:: bash salt '*' panos.get_fqdn_cache """ query = { "type": "op", "cmd": "<request><system><fqdn><show></show></fqdn></system></request>", } return __proxy__["panos.call"](query) def get_ha_config(): """ Get the high availability configuration. CLI Example: .. code-block:: bash salt '*' panos.get_ha_config """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/high-availability", } return __proxy__["panos.call"](query) def get_ha_link(): """ Show high-availability link-monitoring state. CLI Example: .. code-block:: bash salt '*' panos.get_ha_link """ query = { "type": "op", "cmd": "<show><high-availability><link-monitoring></link-monitoring></high-availability></show>", } return __proxy__["panos.call"](query) def get_ha_path(): """ Show high-availability path-monitoring state. CLI Example: .. code-block:: bash salt '*' panos.get_ha_path """ query = { "type": "op", "cmd": "<show><high-availability><path-monitoring></path-monitoring></high-availability></show>", } return __proxy__["panos.call"](query) def get_ha_state(): """ Show high-availability state information. CLI Example: .. code-block:: bash salt '*' panos.get_ha_state """ query = { "type": "op", "cmd": "<show><high-availability><state></state></high-availability></show>", } return __proxy__["panos.call"](query) def get_ha_transitions(): """ Show high-availability transition statistic information. CLI Example: .. code-block:: bash salt '*' panos.get_ha_transitions """ query = { "type": "op", "cmd": "<show><high-availability><transitions></transitions></high-availability></show>", } return __proxy__["panos.call"](query) def get_hostname(): """ Get the hostname of the device. CLI Example: .. code-block:: bash salt '*' panos.get_hostname """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/hostname", } return __proxy__["panos.call"](query) def get_interface_counters(name="all"): """ Get the counter statistics for interfaces. Args: name (str): The name of the interface to view. By default, all interface statistics are viewed. CLI Example: .. code-block:: bash salt '*' panos.get_interface_counters salt '*' panos.get_interface_counters ethernet1/1 """ query = { "type": "op", "cmd": "<show><counter><interface>{}</interface></counter></show>".format(name), } return __proxy__["panos.call"](query) def get_interfaces(name="all"): """ Show interface information. Args: name (str): The name of the interface to view. By default, all interface statistics are viewed. CLI Example: .. code-block:: bash salt '*' panos.get_interfaces salt '*' panos.get_interfaces ethernet1/1 """ query = { "type": "op", "cmd": "<show><interface>{}</interface></show>".format(name), } return __proxy__["panos.call"](query) def get_job(jid=None): """ List all a single job by ID. jid The ID of the job to retrieve. CLI Example: .. code-block:: bash salt '*' panos.get_job jid=15 """ if not jid: raise CommandExecutionError("ID option must not be none.") query = {"type": "op", "cmd": "<show><jobs><id>{}</id></jobs></show>".format(jid)} return __proxy__["panos.call"](query) def get_jobs(state="all"): """ List all jobs on the device. state The state of the jobs to display. Valid options are all, pending, or processed. Pending jobs are jobs that are currently in a running or waiting state. Processed jobs are jobs that have completed execution. CLI Example: .. code-block:: bash salt '*' panos.get_jobs salt '*' panos.get_jobs state=pending """ if state.lower() == "all": query = {"type": "op", "cmd": "<show><jobs><all></all></jobs></show>"} elif state.lower() == "pending": query = {"type": "op", "cmd": "<show><jobs><pending></pending></jobs></show>"} elif state.lower() == "processed": query = { "type": "op", "cmd": "<show><jobs><processed></processed></jobs></show>", } else: raise CommandExecutionError( "The state parameter must be all, pending, or processed." ) return __proxy__["panos.call"](query) def get_lacp(): """ Show LACP state. CLI Example: .. code-block:: bash salt '*' panos.get_lacp """ query = { "type": "op", "cmd": "<show><lacp><aggregate-ethernet>all</aggregate-ethernet></lacp></show>", } return __proxy__["panos.call"](query) def get_license_info(): """ Show information about owned license(s). CLI Example: .. code-block:: bash salt '*' panos.get_license_info """ query = {"type": "op", "cmd": "<request><license><info></info></license></request>"} return __proxy__["panos.call"](query) def get_license_tokens(): """ Show license token files for manual license deactivation. CLI Example: .. code-block:: bash salt '*' panos.get_license_tokens """ query = { "type": "op", "cmd": "<show><license-token-files></license-token-files></show>", } return __proxy__["panos.call"](query) def get_lldp_config(): """ Show lldp config for interfaces. CLI Example: .. code-block:: bash salt '*' panos.get_lldp_config """ query = {"type": "op", "cmd": "<show><lldp><config>all</config></lldp></show>"} return __proxy__["panos.call"](query) def get_lldp_counters(): """ Show lldp counters for interfaces. CLI Example: .. code-block:: bash salt '*' panos.get_lldp_counters """ query = {"type": "op", "cmd": "<show><lldp><counters>all</counters></lldp></show>"} return __proxy__["panos.call"](query) def get_lldp_local(): """ Show lldp local info for interfaces. CLI Example: .. code-block:: bash salt '*' panos.get_lldp_local """ query = {"type": "op", "cmd": "<show><lldp><local>all</local></lldp></show>"} return __proxy__["panos.call"](query) def get_lldp_neighbors(): """ Show lldp neighbors info for interfaces. CLI Example: .. code-block:: bash salt '*' panos.get_lldp_neighbors """ query = { "type": "op", "cmd": "<show><lldp><neighbors>all</neighbors></lldp></show>", } return __proxy__["panos.call"](query) def get_local_admins(): """ Show all local administrator accounts. CLI Example: .. code-block:: bash salt '*' panos.get_local_admins """ admin_list = get_users_config() response = [] if "users" not in admin_list["result"]: return response if isinstance(admin_list["result"]["users"]["entry"], list): for entry in admin_list["result"]["users"]["entry"]: response.append(entry["name"]) else: response.append(admin_list["result"]["users"]["entry"]["name"]) return response def get_logdb_quota(): """ Report the logdb quotas. CLI Example: .. code-block:: bash salt '*' panos.get_logdb_quota """ query = { "type": "op", "cmd": "<show><system><logdb-quota></logdb-quota></system></show>", } return __proxy__["panos.call"](query) def get_master_key(): """ Get the master key properties. CLI Example: .. code-block:: bash salt '*' panos.get_master_key """ query = { "type": "op", "cmd": "<show><system><masterkey-properties></masterkey-properties></system></show>", } return __proxy__["panos.call"](query) def get_ntp_config(): """ Get the NTP configuration from the candidate configuration. CLI Example: .. code-block:: bash salt '*' panos.get_ntp_config """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers", } return __proxy__["panos.call"](query) def get_ntp_servers(): """ Get list of configured NTP servers. CLI Example: .. code-block:: bash salt '*' panos.get_ntp_servers """ query = {"type": "op", "cmd": "<show><ntp></ntp></show>"} return __proxy__["panos.call"](query) def get_operational_mode(): """ Show device operational mode setting. CLI Example: .. code-block:: bash salt '*' panos.get_operational_mode """ query = {"type": "op", "cmd": "<show><operational-mode></operational-mode></show>"} return __proxy__["panos.call"](query) def get_panorama_status(): """ Show panorama connection status. CLI Example: .. code-block:: bash salt '*' panos.get_panorama_status """ query = {"type": "op", "cmd": "<show><panorama-status></panorama-status></show>"} return __proxy__["panos.call"](query) def get_permitted_ips(): """ Get the IP addresses that are permitted to establish management connections to the device. CLI Example: .. code-block:: bash salt '*' panos.get_permitted_ips """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip", } return __proxy__["panos.call"](query) def get_platform(): """ Get the platform model information and limitations. CLI Example: .. code-block:: bash salt '*' panos.get_platform """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/platform", } return __proxy__["panos.call"](query) def get_predefined_application(application=None): """ Get the configuration for the specified pre-defined application object. This will only return pre-defined application objects. application(str): The name of the pre-defined application object. CLI Example: .. code-block:: bash salt '*' panos.get_predefined_application saltstack """ query = { "type": "config", "action": "get", "xpath": "/config/predefined/application/entry[@name='{}']".format(application), } return __proxy__["panos.call"](query) def get_security_rule(rulename=None, vsys="1"): """ Get the candidate configuration for the specified security rule. rulename(str): The name of the security rule. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_security_rule rule01 salt '*' panos.get_security_rule rule01 3 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "rulebase/security/rules/entry[@name='{}']".format(vsys, rulename) ), } return __proxy__["panos.call"](query) def get_service(service=None, vsys="1"): """ Get the candidate configuration for the specified service object. This will not return services that are marked as pre-defined objects. service(str): The name of the service object. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_service tcp-443 salt '*' panos.get_service tcp-443 3 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "service/entry[@name='{}']".format(vsys, service) ), } return __proxy__["panos.call"](query) def get_service_group(servicegroup=None, vsys="1"): """ Get the candidate configuration for the specified service group. This will not return service groups that are marked as pre-defined objects. servicegroup(str): The name of the service group. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_service_group foobar salt '*' panos.get_service_group foobar 3 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "service-group/entry[@name='{}']".format(vsys, servicegroup) ), } return __proxy__["panos.call"](query) def get_session_info(): """ Show device session statistics. CLI Example: .. code-block:: bash salt '*' panos.get_session_info """ query = {"type": "op", "cmd": "<show><session><info></info></session></show>"} return __proxy__["panos.call"](query) def get_snmp_config(): """ Get the SNMP configuration from the device. CLI Example: .. code-block:: bash salt '*' panos.get_snmp_config """ query = { "type": "config", "action": "get", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/snmp-setting", } return __proxy__["panos.call"](query) def get_software_info(): """ Show information about available software packages. CLI Example: .. code-block:: bash salt '*' panos.get_software_info """ query = { "type": "op", "cmd": "<request><system><software><info></info></software></system></request>", } return __proxy__["panos.call"](query) def get_system_date_time(): """ Get the system date/time. CLI Example: .. code-block:: bash salt '*' panos.get_system_date_time """ query = {"type": "op", "cmd": "<show><clock></clock></show>"} return __proxy__["panos.call"](query) def get_system_files(): """ List important files in the system. CLI Example: .. code-block:: bash salt '*' panos.get_system_files """ query = {"type": "op", "cmd": "<show><system><files></files></system></show>"} return __proxy__["panos.call"](query) def get_system_info(): """ Get the system information. CLI Example: .. code-block:: bash salt '*' panos.get_system_info """ query = {"type": "op", "cmd": "<show><system><info></info></system></show>"} return __proxy__["panos.call"](query) def get_system_services(): """ Show system services. CLI Example: .. code-block:: bash salt '*' panos.get_system_services """ query = {"type": "op", "cmd": "<show><system><services></services></system></show>"} return __proxy__["panos.call"](query) def get_system_state(mask=None): """ Show the system state variables. mask Filters by a subtree or a wildcard. CLI Example: .. code-block:: bash salt '*' panos.get_system_state salt '*' panos.get_system_state mask=cfg.ha.config.enabled salt '*' panos.get_system_state mask=cfg.ha.* """ if mask: query = { "type": "op", "cmd": ( "<show><system><state><filter>{}</filter></state></system></show>".format( mask ) ), } else: query = {"type": "op", "cmd": "<show><system><state></state></system></show>"} return __proxy__["panos.call"](query) def get_uncommitted_changes(): """ Retrieve a list of all uncommitted changes on the device. Requires PANOS version 8.0.0 or greater. CLI Example: .. code-block:: bash salt '*' panos.get_uncommitted_changes """ _required_version = "8.0.0" if not __proxy__["panos.is_required_version"](_required_version): return ( False, "The panos device requires version {} or greater for this command.".format( _required_version ), ) query = { "type": "op", "cmd": "<show><config><list><changes></changes></list></config></show>", } return __proxy__["panos.call"](query) def get_users_config(): """ Get the local administrative user account configuration. CLI Example: .. code-block:: bash salt '*' panos.get_users_config """ query = {"type": "config", "action": "get", "xpath": "/config/mgt-config/users"} return __proxy__["panos.call"](query) def get_vlans(): """ Show all VLAN information. CLI Example: .. code-block:: bash salt '*' panos.get_vlans """ query = {"type": "op", "cmd": "<show><vlan>all</vlan></show>"} return __proxy__["panos.call"](query) def get_xpath(xpath=""): """ Retrieve a specified xpath from the candidate configuration. xpath(str): The specified xpath in the candidate configuration. CLI Example: .. code-block:: bash salt '*' panos.get_xpath /config/shared/service """ query = {"type": "config", "action": "get", "xpath": xpath} return __proxy__["panos.call"](query) def get_zone(zone="", vsys="1"): """ Get the candidate configuration for the specified zone. zone(str): The name of the zone. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_zone trust salt '*' panos.get_zone trust 2 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "zone/entry[@name='{}']".format(vsys, zone) ), } return __proxy__["panos.call"](query) def get_zones(vsys="1"): """ Get all the zones in the candidate configuration. vsys(str): The string representation of the VSYS ID. CLI Example: .. code-block:: bash salt '*' panos.get_zones salt '*' panos.get_zones 2 """ query = { "type": "config", "action": "get", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/" "zone".format(vsys) ), } return __proxy__["panos.call"](query) def install_antivirus( version=None, latest=False, synch=False, skip_commit=False, ): """ Install anti-virus packages. Args: version(str): The version of the PANOS file to install. latest(bool): If true, the latest anti-virus file will be installed. The specified version option will be ignored. synch(bool): If true, the anti-virus will synch to the peer unit. skip_commit(bool): If true, the install will skip committing to the device. CLI Example: .. code-block:: bash salt '*' panos.install_antivirus 8.0.0 """ if not version and latest is False: raise CommandExecutionError("Version option must not be none.") if synch is True: s = "yes" else: s = "no" if skip_commit is True: c = "yes" else: c = "no" if latest is True: query = { "type": "op", "cmd": ( "<request><anti-virus><upgrade><install>" "<commit>{}</commit><sync-to-peer>{}</sync-to-peer>" "<version>latest</version></install></upgrade></anti-virus></request>".format( c, s ) ), } else: query = { "type": "op", "cmd": ( "<request><anti-virus><upgrade><install>" "<commit>{}</commit><sync-to-peer>{}</sync-to-peer>" "<version>{}</version></install></upgrade></anti-virus></request>".format( c, s, version ) ), } return _get_job_results(query) def install_license(): """ Install the license key(s). CLI Example: .. code-block:: bash salt '*' panos.install_license """ query = { "type": "op", "cmd": "<request><license><install></install></license></request>", } return __proxy__["panos.call"](query) def install_software(version=None): """ Upgrade to a software package by version. Args: version(str): The version of the PANOS file to install. CLI Example: .. code-block:: bash salt '*' panos.install_license 8.0.0 """ if not version: raise CommandExecutionError("Version option must not be none.") query = { "type": "op", "cmd": ( "<request><system><software><install>" "<version>{}</version></install></software></system></request>".format( version ) ), } return _get_job_results(query) def reboot(): """ Reboot a running system. CLI Example: .. code-block:: bash salt '*' panos.reboot """ query = { "type": "op", "cmd": "<request><restart><system></system></restart></request>", } return __proxy__["panos.call"](query) def refresh_fqdn_cache(force=False): """ Force refreshes all FQDNs used in rules. force Forces all fqdn refresh CLI Example: .. code-block:: bash salt '*' panos.refresh_fqdn_cache salt '*' panos.refresh_fqdn_cache force=True """ if not isinstance(force, bool): raise CommandExecutionError("Force option must be boolean.") if force: query = { "type": "op", "cmd": "<request><system><fqdn><refresh><force>yes</force></refresh></fqdn></system></request>", } else: query = { "type": "op", "cmd": ( "<request><system><fqdn><refresh></refresh></fqdn></system></request>" ), } return __proxy__["panos.call"](query) def remove_config_lock(): """ Release config lock previously held. CLI Example: .. code-block:: bash salt '*' panos.remove_config_lock """ query = { "type": "op", "cmd": "<request><config-lock><remove></remove></config-lock></request>", } return __proxy__["panos.call"](query) def resolve_address(address=None, vsys=None): """ Resolve address to ip address. Required version 7.0.0 or greater. address Address name you want to resolve. vsys The vsys name. CLI Example: .. code-block:: bash salt '*' panos.resolve_address foo.bar.com salt '*' panos.resolve_address foo.bar.com vsys=2 """ _required_version = "7.0.0" if not __proxy__["panos.is_required_version"](_required_version): return ( False, "The panos device requires version {} or greater for this command.".format( _required_version ), ) if not address: raise CommandExecutionError("FQDN to resolve must be provided as address.") if not vsys: query = { "type": "op", "cmd": "<request><resolve><address>{}</address></resolve></request>".format( address ), } else: query = { "type": "op", "cmd": ( "<request><resolve><vsys>{}</vsys><address>{}</address></resolve>" "</request>".format(vsys, address) ), } return __proxy__["panos.call"](query) def save_device_config(filename=None): """ Save device configuration to a named file. filename The filename to save the configuration to. CLI Example: .. code-block:: bash salt '*' panos.save_device_config foo.xml """ if not filename: raise CommandExecutionError("Filename must not be empty.") query = { "type": "op", "cmd": "<save><config><to>{}</to></config></save>".format(filename), } return __proxy__["panos.call"](query) def save_device_state(): """ Save files needed to restore device to local disk. CLI Example: .. code-block:: bash salt '*' panos.save_device_state """ query = {"type": "op", "cmd": "<save><device-state></device-state></save>"} return __proxy__["panos.call"](query) def set_authentication_profile(profile=None, deploy=False): """ Set the authentication profile of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: profile (str): The name of the authentication profile to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_authentication_profile foo salt '*' panos.set_authentication_profile foo deploy=True """ if not profile: raise CommandExecutionError("Profile name option must not be none.") ret = {} query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/" "authentication-profile" ), "element": "<authentication-profile>{}</authentication-profile>".format( profile ), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_hostname(hostname=None, deploy=False): """ Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: hostname (str): The hostname to set deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_hostname newhostname salt '*' panos.set_hostname newhostname deploy=True """ if not hostname: raise CommandExecutionError("Hostname option must not be none.") ret = {} query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system" ), "element": "<hostname>{}</hostname>".format(hostname), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_icmp(enabled=True, deploy=False): """ Enables or disables the ICMP management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_icmp salt '*' panos.set_management_icmp enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-icmp>{}</disable-icmp>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_http(enabled=True, deploy=False): """ Enables or disables the HTTP management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_http salt '*' panos.set_management_http enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-http>{}</disable-http>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_https(enabled=True, deploy=False): """ Enables or disables the HTTPS management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_https salt '*' panos.set_management_https enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-https>{}</disable-https>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_ocsp(enabled=True, deploy=False): """ Enables or disables the HTTP OCSP management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_ocsp salt '*' panos.set_management_ocsp enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-http-ocsp>{}</disable-http-ocsp>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_snmp(enabled=True, deploy=False): """ Enables or disables the SNMP management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_snmp salt '*' panos.set_management_snmp enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-snmp>{}</disable-snmp>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_ssh(enabled=True, deploy=False): """ Enables or disables the SSH management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_ssh salt '*' panos.set_management_ssh enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-ssh>{}</disable-ssh>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_management_telnet(enabled=True, deploy=False): """ Enables or disables the Telnet management service on the device. CLI Example: Args: enabled (bool): If true the service will be enabled. If false the service will be disabled. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_management_telnet salt '*' panos.set_management_telnet enabled=False deploy=True """ if enabled is True: value = "no" elif enabled is False: value = "yes" else: raise CommandExecutionError( "Invalid option provided for service enabled option." ) ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service", "element": "<disable-telnet>{}</disable-telnet>".format(value), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_ntp_authentication( target=None, authentication_type=None, key_id=None, authentication_key=None, algorithm=None, deploy=False, ): """ Set the NTP authentication of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: target(str): Determines the target of the authentication. Valid options are primary, secondary, or both. authentication_type(str): The authentication type to be used. Valid options are symmetric, autokey, and none. key_id(int): The NTP authentication key ID. authentication_key(str): The authentication key. algorithm(str): The algorithm type to be used for a symmetric key. Valid options are md5 and sha1. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' ntp.set_authentication target=both authentication_type=autokey salt '*' ntp.set_authentication target=primary authentication_type=none salt '*' ntp.set_authentication target=both authentication_type=symmetric key_id=15 authentication_key=mykey algorithm=md5 salt '*' ntp.set_authentication target=both authentication_type=symmetric key_id=15 authentication_key=mykey algorithm=md5 deploy=True """ ret = {} if target not in ["primary", "secondary", "both"]: raise salt.exceptions.CommandExecutionError( "Target option must be primary, secondary, or both." ) if authentication_type not in ["symmetric", "autokey", "none"]: raise salt.exceptions.CommandExecutionError( "Type option must be symmetric, autokey, or both." ) if authentication_type == "symmetric" and not authentication_key: raise salt.exceptions.CommandExecutionError( "When using symmetric authentication, authentication_key must be provided." ) if authentication_type == "symmetric" and not key_id: raise salt.exceptions.CommandExecutionError( "When using symmetric authentication, key_id must be provided." ) if authentication_type == "symmetric" and algorithm not in ["md5", "sha1"]: raise salt.exceptions.CommandExecutionError( "When using symmetric authentication, algorithm must be md5 or sha1." ) if authentication_type == "symmetric": if target == "primary" or target == "both": query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "primary-ntp-server/authentication-type" ), "element": ( "<symmetric-key><algorithm><{0}><authentication-key>{1}</authentication-key></{0}>" "</algorithm><key-id>{2}</key-id></symmetric-key>".format( algorithm, authentication_key, key_id ) ), } ret.update({"primary_server": __proxy__["panos.call"](query)}) if target == "secondary" or target == "both": query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "secondary-ntp-server/authentication-type" ), "element": ( "<symmetric-key><algorithm><{0}><authentication-key>{1}</authentication-key></{0}>" "</algorithm><key-id>{2}</key-id></symmetric-key>".format( algorithm, authentication_key, key_id ) ), } ret.update({"secondary_server": __proxy__["panos.call"](query)}) elif authentication_type == "autokey": if target == "primary" or target == "both": query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "primary-ntp-server/authentication-type" ), "element": "<autokey/>", } ret.update({"primary_server": __proxy__["panos.call"](query)}) if target == "secondary" or target == "both": query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "secondary-ntp-server/authentication-type" ), "element": "<autokey/>", } ret.update({"secondary_server": __proxy__["panos.call"](query)}) elif authentication_type == "none": if target == "primary" or target == "both": query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "primary-ntp-server/authentication-type" ), "element": "<none/>", } ret.update({"primary_server": __proxy__["panos.call"](query)}) if target == "secondary" or target == "both": query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "secondary-ntp-server/authentication-type" ), "element": "<none/>", } ret.update({"secondary_server": __proxy__["panos.call"](query)}) if deploy is True: ret.update(commit()) return ret def set_ntp_servers(primary_server=None, secondary_server=None, deploy=False): """ Set the NTP servers of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: primary_server(str): The primary NTP server IP address or FQDN. secondary_server(str): The secondary NTP server IP address or FQDN. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' ntp.set_servers 0.pool.ntp.org 1.pool.ntp.org salt '*' ntp.set_servers primary_server=0.pool.ntp.org secondary_server=1.pool.ntp.org salt '*' ntp.ser_servers 0.pool.ntp.org 1.pool.ntp.org deploy=True """ ret = {} if primary_server: query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "primary-ntp-server" ), "element": "<ntp-server-address>{}</ntp-server-address>".format( primary_server ), } ret.update({"primary_server": __proxy__["panos.call"](query)}) if secondary_server: query = { "type": "config", "action": "set", "xpath": ( "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers/" "secondary-ntp-server" ), "element": "<ntp-server-address>{}</ntp-server-address>".format( secondary_server ), } ret.update({"secondary_server": __proxy__["panos.call"](query)}) if deploy is True: ret.update(commit()) return ret def set_permitted_ip(address=None, deploy=False): """ Add an IPv4 address or network to the permitted IP list. CLI Example: Args: address (str): The IPv4 address or network to allow access to add to the Palo Alto device. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_permitted_ip 10.0.0.1 salt '*' panos.set_permitted_ip 10.0.0.0/24 salt '*' panos.set_permitted_ip 10.0.0.1 deploy=True """ if not address: raise CommandExecutionError("Address option must not be empty.") ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip", "element": "<entry name='{}'></entry>".format(address), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def set_timezone(tz=None, deploy=False): """ Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: tz (str): The name of the timezone to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_timezone UTC salt '*' panos.set_timezone UTC deploy=True """ if not tz: raise CommandExecutionError("Timezone name option must not be none.") ret = {} query = { "type": "config", "action": "set", "xpath": "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/timezone", "element": "<timezone>{}</timezone>".format(tz), } ret.update(__proxy__["panos.call"](query)) if deploy is True: ret.update(commit()) return ret def shutdown(): """ Shutdown a running system. CLI Example: .. code-block:: bash salt '*' panos.shutdown """ query = { "type": "op", "cmd": "<request><shutdown><system></system></shutdown></request>", } return __proxy__["panos.call"](query) def test_fib_route(ip=None, vr="vr1"): """ Perform a route lookup within active route table (fib). ip (str): The destination IP address to test. vr (str): The name of the virtual router to test. CLI Example: .. code-block:: bash salt '*' panos.test_fib_route 4.2.2.2 salt '*' panos.test_fib_route 4.2.2.2 my-vr """ xpath = "<test><routing><fib-lookup>" if ip: xpath += "<ip>{}</ip>".format(ip) if vr: xpath += "<virtual-router>{}</virtual-router>".format(vr) xpath += "</fib-lookup></routing></test>" query = {"type": "op", "cmd": xpath} return __proxy__["panos.call"](query) def test_security_policy( sourcezone=None, destinationzone=None, source=None, destination=None, protocol=None, port=None, application=None, category=None, vsys="1", allrules=False, ): """ Checks which security policy as connection will match on the device. sourcezone (str): The source zone matched against the connection. destinationzone (str): The destination zone matched against the connection. source (str): The source address. This must be a single IP address. destination (str): The destination address. This must be a single IP address. protocol (int): The protocol number for the connection. This is the numerical representation of the protocol. port (int): The port number for the connection. application (str): The application that should be matched. category (str): The category that should be matched. vsys (int): The numerical representation of the VSYS ID. allrules (bool): Show all potential match rules until first allow rule. CLI Example: .. code-block:: bash salt '*' panos.test_security_policy sourcezone=trust destinationzone=untrust protocol=6 port=22 salt '*' panos.test_security_policy sourcezone=trust destinationzone=untrust protocol=6 port=22 vsys=2 """ xpath = "<test><security-policy-match>" if sourcezone: xpath += "<from>{}</from>".format(sourcezone) if destinationzone: xpath += "<to>{}</to>".format(destinationzone) if source: xpath += "<source>{}</source>".format(source) if destination: xpath += "<destination>{}</destination>".format(destination) if protocol: xpath += "<protocol>{}</protocol>".format(protocol) if port: xpath += "<destination-port>{}</destination-port>".format(port) if application: xpath += "<application>{}</application>".format(application) if category: xpath += "<category>{}</category>".format(category) if allrules: xpath += "<show-all>yes</show-all>" xpath += "</security-policy-match></test>" query = {"type": "op", "vsys": "vsys{}".format(vsys), "cmd": xpath} return __proxy__["panos.call"](query) def unlock_admin(username=None): """ Unlocks a locked administrator account. username Username of the administrator. CLI Example: .. code-block:: bash salt '*' panos.unlock_admin username=bob """ if not username: raise CommandExecutionError("Username option must not be none.") query = { "type": "op", "cmd": ( "<set><management-server><unlock><admin>{}</admin></unlock></management-server>" "</set>".format(username) ), } return __proxy__["panos.call"](query)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/panos.py
0.692538
0.199776
panos.py
pypi
import copy import salt.utils.data import salt.utils.functools import salt.utils.pkg import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError # Define the module's virtual name __virtualname__ = "pkgutil" def __virtual__(): """ Set the virtual pkg module if the os is Solaris """ if __grains__["os_family"] == "Solaris": return __virtualname__ return ( False, "The pkgutil execution module cannot be loaded: " "only available on Solaris systems.", ) def refresh_db(): """ Updates the pkgutil repo database (pkgutil -U) CLI Example: .. code-block:: bash salt '*' pkgutil.refresh_db """ # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) return __salt__["cmd.retcode"]("/opt/csw/bin/pkgutil -U") == 0 def upgrade_available(name): """ Check if there is an upgrade available for a certain package CLI Example: .. code-block:: bash salt '*' pkgutil.upgrade_available CSWpython """ version_num = None cmd = "/opt/csw/bin/pkgutil -c --parse --single {}".format(name) out = __salt__["cmd.run_stdout"](cmd) if out: version_num = out.split()[2].strip() if version_num: if version_num == "SAME": return "" else: return version_num return "" def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 """ List all available package upgrades on this system CLI Example: .. code-block:: bash salt '*' pkgutil.list_upgrades """ if salt.utils.data.is_true(refresh): refresh_db() upgrades = {} lines = __salt__["cmd.run_stdout"]("/opt/csw/bin/pkgutil -A --parse").splitlines() for line in lines: comps = line.split("\t") if comps[2] == "SAME": continue if comps[2] == "not installed": continue upgrades[comps[0]] = comps[1] return upgrades def upgrade(refresh=True): """ Upgrade all of the packages to the latest available version. Returns a dict containing the changes:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkgutil.upgrade """ if salt.utils.data.is_true(refresh): refresh_db() old = list_pkgs() # Install or upgrade the package # If package is already installed cmd = "/opt/csw/bin/pkgutil -yu" __salt__["cmd.run_all"](cmd) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() return salt.utils.data.compare_dicts(old, new) def _list_pkgs_from_context(versions_as_list): """ Use pkg list from __context__ """ if versions_as_list: return __context__["pkg.list_pkgs"] else: ret = copy.deepcopy(__context__["pkg.list_pkgs"]) __salt__["pkg_resource.stringify"](ret) return ret def list_pkgs(versions_as_list=False, **kwargs): """ List the packages currently installed as a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs salt '*' pkg.list_pkgs versions_as_list=True """ versions_as_list = salt.utils.data.is_true(versions_as_list) # 'removed' not yet implemented or not applicable if salt.utils.data.is_true(kwargs.get("removed")): return {} if "pkg.list_pkgs" in __context__ and kwargs.get("use_context", True): return _list_pkgs_from_context(versions_as_list) ret = {} cmd = "/usr/bin/pkginfo -x" # Package information returned two lines per package. On even-offset # lines, the package name is in the first column. On odd-offset lines, the # package version is in the second column. lines = __salt__["cmd.run"](cmd).splitlines() for index, line in enumerate(lines): if index % 2 == 0: name = line.split()[0].strip() if index % 2 == 1: version_num = line.split()[1].strip() __salt__["pkg_resource.add_pkg"](ret, name, version_num) __salt__["pkg_resource.sort_pkglist"](ret) __context__["pkg.list_pkgs"] = copy.deepcopy(ret) if not versions_as_list: __salt__["pkg_resource.stringify"](ret) return ret def version(*names, **kwargs): """ Returns a version if the package is installed, else returns an empty string CLI Example: .. code-block:: bash salt '*' pkgutil.version CSWpython """ return __salt__["pkg_resource.version"](*names, **kwargs) def latest_version(*names, **kwargs): """ Return the latest version of the named package available for upgrade or installation. If more than one package name is specified, a dict of name/version pairs is returned. If the latest version of a given package is already installed, an empty string will be returned for that package. CLI Example: .. code-block:: bash salt '*' pkgutil.latest_version CSWpython salt '*' pkgutil.latest_version <package1> <package2> <package3> ... """ refresh = salt.utils.data.is_true(kwargs.pop("refresh", True)) if not names: return "" ret = {} # Initialize the dict with empty strings for name in names: ret[name] = "" # Refresh before looking for the latest version available if refresh: refresh_db() pkgs = list_pkgs() cmd = "/opt/csw/bin/pkgutil -a --parse {}".format(" ".join(names)) output = __salt__["cmd.run_all"](cmd).get("stdout", "").splitlines() for line in output: try: name, version_rev = line.split()[1:3] except ValueError: continue if name in names: cver = pkgs.get(name, "") nver = version_rev.split(",")[0] if not cver or salt.utils.versions.compare(ver1=cver, oper="<", ver2=nver): # Remove revision for version comparison ret[name] = version_rev # Return a string if only one package name passed if len(names) == 1: return ret[names[0]] return ret # available_version is being deprecated available_version = salt.utils.functools.alias_function( latest_version, "available_version" ) def install(name=None, refresh=False, version=None, pkgs=None, **kwargs): """ Install packages using the pkgutil tool. CLI Example: .. code-block:: bash salt '*' pkg.install <package_name> salt '*' pkg.install SMClgcc346 Multiple Package Installation Options: pkgs A list of packages to install from OpenCSW. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo", "bar"]' salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]' Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} """ if refresh: refresh_db() try: # Ignore 'sources' argument pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs, **kwargs)[0] except MinionError as exc: raise CommandExecutionError(exc) if pkg_params is None or len(pkg_params) == 0: return {} if pkgs is None and version and len(pkg_params) == 1: pkg_params = {name: version} targets = [] for param, pkgver in pkg_params.items(): if pkgver is None: targets.append(param) else: targets.append("{}-{}".format(param, pkgver)) cmd = "/opt/csw/bin/pkgutil -yu {}".format(" ".join(targets)) old = list_pkgs() __salt__["cmd.run_all"](cmd) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() return salt.utils.data.compare_dicts(old, new) def remove(name=None, pkgs=None, **kwargs): """ Remove a package and all its dependencies which are not in use by other packages. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' """ try: pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = "/opt/csw/bin/pkgutil -yr {}".format(" ".join(targets)) __salt__["cmd.run_all"](cmd) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() return salt.utils.data.compare_dicts(old, new) def purge(name=None, pkgs=None, **kwargs): """ Package purges are not supported, this function is identical to ``remove()``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> salt '*' pkg.purge <package1>,<package2>,<package3> salt '*' pkg.purge pkgs='["foo", "bar"]' """ return remove(name=name, pkgs=pkgs)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/pkgutil.py
0.498535
0.155848
pkgutil.py
pypi
import logging import xml.etree.ElementTree as ET log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "xml" def __virtual__(): """ Only load the module if all modules are imported correctly. """ return __virtualname__ def get_value(file, element): """ Returns the value of the matched xpath element CLI Example: .. code-block:: bash salt '*' xml.get_value /tmp/test.xml ".//element" """ try: root = ET.parse(file) element = root.find(element) return element.text except AttributeError: log.error("Unable to find element matching %s", element) return False def set_value(file, element, value): """ Sets the value of the matched xpath element CLI Example: .. code-block:: bash salt '*' xml.set_value /tmp/test.xml ".//element" "new value" """ try: root = ET.parse(file) relement = root.find(element) except AttributeError: log.error("Unable to find element matching %s", element) return False relement.text = str(value) root.write(file) return True def get_attribute(file, element): """ Return the attributes of the matched xpath element. CLI Example: .. code-block:: bash salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']" """ try: root = ET.parse(file) element = root.find(element) return element.attrib except AttributeError: log.error("Unable to find element matching %s", element) return False def set_attribute(file, element, key, value): """ Set the requested attribute key and value for matched xpath element. CLI Example: .. code-block:: bash salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal" """ try: root = ET.parse(file) element = root.find(element) except AttributeError: log.error("Unable to find element matching %s", element) return False element.set(key, str(value)) root.write(file) return True
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/xml.py
0.683525
0.178383
xml.py
pypi
import ast import logging import os import salt.payload import salt.utils.files log = logging.getLogger(__name__) def clear(): """ Clear out all of the data in the minion datastore, this function is destructive! CLI Example: .. code-block:: bash salt '*' data.clear """ try: os.remove(os.path.join(__opts__["cachedir"], "datastore")) except OSError: pass return True def load(): """ Return all of the data in the minion datastore CLI Example: .. code-block:: bash salt '*' data.load """ try: datastore_path = os.path.join(__opts__["cachedir"], "datastore") with salt.utils.files.fopen(datastore_path, "rb") as rfh: return salt.payload.loads(rfh.read()) except (OSError, NameError): return {} def dump(new_data): """ Replace the entire datastore with a passed data structure CLI Example: .. code-block:: bash salt '*' data.dump '{'eggs': 'spam'}' """ if not isinstance(new_data, dict): if isinstance(ast.literal_eval(new_data), dict): new_data = ast.literal_eval(new_data) else: return False try: datastore_path = os.path.join(__opts__["cachedir"], "datastore") with salt.utils.files.fopen(datastore_path, "w+b") as fn_: salt.payload.dump(new_data, fn_) return True except (OSError, NameError): return False def update(key, value): """ Update a key with a value in the minion datastore CLI Example: .. code-block:: bash salt '*' data.update <key> <value> """ store = load() store[key] = value dump(store) return True def cas(key, value, old_value): """ Check and set a value in the minion datastore CLI Example: .. code-block:: bash salt '*' data.cas <key> <value> <old_value> """ store = load() if key not in store: return False if store[key] != old_value: return False store[key] = value dump(store) return True def pop(key, default=None): """ Pop (return & delete) a value from the minion datastore .. versionadded:: 2015.5.2 CLI Example: .. code-block:: bash salt '*' data.pop <key> "there was no val" """ store = load() val = store.pop(key, default) dump(store) return val def get(key, default=None): """ Get a (list of) value(s) from the minion datastore .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' data.get key salt '*' data.get '["key1", "key2"]' """ store = load() if isinstance(key, str): return store.get(key, default) elif default is None: return [store[k] for k in key if k in store] else: return [store.get(k, default) for k in key] def keys(): """ Get all keys from the minion datastore .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' data.keys """ store = load() return [k for k in store.keys()] def values(): """ Get values from the minion datastore .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' data.values """ store = load() return [v for v in store.values()] def items(): """ Get items from the minion datastore .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' data.items """ store = load() return store def has_key(key): """ Check if key is in the minion datastore .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' data.has_key <mykey> """ store = load() return key in store
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/data.py
0.604049
0.327077
data.py
pypi
import logging import salt.utils.args try: import jira HAS_JIRA = True except ImportError: HAS_JIRA = False log = logging.getLogger(__name__) __virtualname__ = "jira" __proxyenabled__ = ["*"] JIRA = None def __virtual__(): return ( __virtualname__ if HAS_JIRA else (False, "Please install the jira Python library from PyPI") ) def _get_credentials(server=None, username=None, password=None): """ Returns the credentials merged with the config data (opts + pillar). """ jira_cfg = __salt__["config.merge"]("jira", default={}) if not server: server = jira_cfg.get("server") if not username: username = jira_cfg.get("username") if not password: password = jira_cfg.get("password") return server, username, password def _get_jira(server=None, username=None, password=None): global JIRA if not JIRA: server, username, password = _get_credentials( server=server, username=username, password=password ) JIRA = jira.JIRA( basic_auth=(username, password), server=server, logging=True ) # We want logging return JIRA def create_issue( project, summary, description, template_engine="jinja", context=None, defaults=None, saltenv="base", issuetype="Bug", priority="Normal", labels=None, assignee=None, server=None, username=None, password=None, **kwargs ): """ Create a JIRA issue using the named settings. Return the JIRA ticket ID. project The name of the project to attach the JIRA ticket to. summary The summary (title) of the JIRA ticket. When the ``template_engine`` argument is set to a proper value of an existing Salt template engine (e.g., ``jinja``, ``mako``, etc.) it will render the ``summary`` before creating the ticket. description The full body description of the JIRA ticket. When the ``template_engine`` argument is set to a proper value of an existing Salt template engine (e.g., ``jinja``, ``mako``, etc.) it will render the ``description`` before creating the ticket. template_engine: ``jinja`` The name of the template engine to be used to render the values of the ``summary`` and ``description`` arguments. Default: ``jinja``. context: ``None`` The context to pass when rendering the ``summary`` and ``description``. This argument is ignored when ``template_engine`` is set as ``None`` defaults: ``None`` Default values to pass to the Salt rendering pipeline for the ``summary`` and ``description`` arguments. This argument is ignored when ``template_engine`` is set as ``None``. saltenv: ``base`` The Salt environment name (for the rendering system). issuetype: ``Bug`` The type of the JIRA ticket. Default: ``Bug``. priority: ``Normal`` The priority of the JIRA ticket. Default: ``Normal``. labels: ``None`` A list of labels to add to the ticket. assignee: ``None`` The name of the person to assign the ticket to. CLI Examples: .. code-block:: bash salt '*' jira.create_issue NET 'Ticket title' 'Ticket description' salt '*' jira.create_issue NET 'Issue on {{ opts.id }}' 'Error detected on {{ opts.id }}' template_engine=jinja """ if template_engine: summary = __salt__["file.apply_template_on_contents"]( summary, template=template_engine, context=context, defaults=defaults, saltenv=saltenv, ) description = __salt__["file.apply_template_on_contents"]( description, template=template_engine, context=context, defaults=defaults, saltenv=saltenv, ) jira_ = _get_jira(server=server, username=username, password=password) if not labels: labels = [] data = { "project": {"key": project}, "summary": summary, "description": description, "issuetype": {"name": issuetype}, "priority": {"name": priority}, "labels": labels, } data.update(salt.utils.args.clean_kwargs(**kwargs)) issue = jira_.create_issue(data) issue_key = str(issue) if assignee: assign_issue(issue_key, assignee) return issue_key def assign_issue(issue_key, assignee, server=None, username=None, password=None): """ Assign the issue to an existing user. Return ``True`` when the issue has been properly assigned. issue_key The JIRA ID of the ticket to manipulate. assignee The name of the user to assign the ticket to. CLI Example: .. code-block:: bash salt '*' jira.assign_issue NET-123 example_user """ jira_ = _get_jira(server=server, username=username, password=password) assigned = jira_.assign_issue(issue_key, assignee) return assigned def add_comment( issue_key, comment, visibility=None, is_internal=False, server=None, username=None, password=None, ): """ Add a comment to an existing ticket. Return ``True`` when it successfully added the comment. issue_key The issue ID to add the comment to. comment The body of the comment to be added. visibility: ``None`` A dictionary having two keys: - ``type``: is ``role`` (or ``group`` if the JIRA server has configured comment visibility for groups). - ``value``: the name of the role (or group) to which viewing of this comment will be restricted. is_internal: ``False`` Whether a comment has to be marked as ``Internal`` in Jira Service Desk. CLI Example: .. code-block:: bash salt '*' jira.add_comment NE-123 'This is a comment' """ jira_ = _get_jira(server=server, username=username, password=password) comm = jira_.add_comment( issue_key, comment, visibility=visibility, is_internal=is_internal ) return True def issue_closed(issue_key, server=None, username=None, password=None): """ Check if the issue is closed. issue_key The JIRA iD of the ticket to close. Returns: - ``True``: the ticket exists and it is closed. - ``False``: the ticket exists and it has not been closed. - ``None``: the ticket does not exist. CLI Example: .. code-block:: bash salt '*' jira.issue_closed NE-123 """ if not issue_key: return None jira_ = _get_jira(server=server, username=username, password=password) try: ticket = jira_.issue(issue_key) except jira.exceptions.JIRAError: # Ticket not found return None return ticket.fields().status.name == "Closed"
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/jira_mod.py
0.652352
0.221172
jira_mod.py
pypi
import logging import os import sys import traceback from collections.abc import Mapping import salt.channel.client import salt.crypt import salt.payload import salt.utils.event import salt.utils.zeromq __proxyenabled__ = ["*"] log = logging.getLogger(__name__) def _dict_subset(keys, master_dict): """ Return a dictionary of only the subset of keys/values specified in keys """ return {k: v for k, v in master_dict.items() if k in keys} def fire_master(data, tag, preload=None): """ Fire an event off up to the master server CLI Example: .. code-block:: bash salt '*' event.fire_master '{"data":"my event data"}' 'tag' """ if ( __opts__.get("local", None) or __opts__.get("file_client", None) == "local" ) and not __opts__.get("use_master_when_local", False): # We can't send an event if we're in masterless mode log.warning("Local mode detected. Event with tag %s will NOT be sent.", tag) return False if preload or __opts__.get("__cli") == "salt-call": # If preload is specified, we must send a raw event (this is # slower because it has to independently authenticate) if "master_uri" not in __opts__: __opts__["master_uri"] = "tcp://{ip}:{port}".format( ip=salt.utils.zeromq.ip_bracket(__opts__["interface"]), port=__opts__.get("ret_port", "4506"), # TODO, no fallback ) masters = list() ret = True if "master_uri_list" in __opts__: for master_uri in __opts__["master_uri_list"]: masters.append(master_uri) else: masters.append(__opts__["master_uri"]) auth = salt.crypt.SAuth(__opts__) load = { "id": __opts__["id"], "tag": tag, "data": data, "tok": auth.gen_token(b"salt"), "cmd": "_minion_event", } if isinstance(preload, dict): load.update(preload) for master in masters: with salt.channel.client.ReqChannel.factory( __opts__, master_uri=master ) as channel: try: channel.send(load) # channel.send was successful. # Ensure ret is True. ret = True except Exception: # pylint: disable=broad-except ret = False return ret else: # Usually, we can send the event via the minion, which is faster # because it is already authenticated try: return salt.utils.event.MinionEvent(__opts__, listen=False).fire_event( {"data": data, "tag": tag, "events": None, "pretag": None}, "fire_master", ) except Exception: # pylint: disable=broad-except exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def fire(data, tag): """ Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' """ try: with salt.utils.event.get_event( "minion", # was __opts__['id'] sock_dir=__opts__["sock_dir"], opts=__opts__, listen=False, ) as event: return event.fire_event(data, tag) except Exception: # pylint: disable=broad-except exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def send( tag, data=None, preload=None, with_env=False, with_grains=False, with_pillar=False, with_env_opts=False, **kwargs ): """ Send an event to the Salt Master .. versionadded:: 2014.7.0 :param tag: A tag to give the event. Use slashes to create a namespace for related events. E.g., ``myco/build/buildserver1/start``, ``myco/build/buildserver1/success``, ``myco/build/buildserver1/failure``. :param data: A dictionary of data to send in the event. This is free-form. Send any data points that are needed for whoever is consuming the event. Arguments on the CLI are interpreted as YAML so complex data structures are possible. :param with_env: Include environment variables from the current shell environment in the event data as ``environ``.. This is a short-hand for working with systems that seed the environment with relevant data such as Jenkins. :type with_env: Specify ``True`` to include all environment variables, or specify a list of strings of variable names to include. :param with_grains: Include grains from the current minion in the event data as ``grains``. :type with_grains: Specify ``True`` to include all grains, or specify a list of strings of grain names to include. :param with_pillar: Include Pillar values from the current minion in the event data as ``pillar``. Remember Pillar data is often sensitive data so be careful. This is useful for passing ephemeral Pillar values through an event. Such as passing the ``pillar={}`` kwarg in :py:func:`state.sls <salt.modules.state.sls>` from the Master, through an event on the Minion, then back to the Master. :type with_pillar: Specify ``True`` to include all Pillar values, or specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion at the moment when event is send into event data. :type with_env_opts: Specify ``True`` to include ``saltenv`` and ``pillarenv`` values or ``False`` to omit them. :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. CLI Example: .. code-block:: bash salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The following rule in sudoers will allow the ``jenkins`` user to run only the following command. ``/etc/sudoers`` (allow preserving the environment): .. code-block:: text jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* Call Jenkins via sudo (preserve the environment): .. code-block:: bash sudo -E salt-call event.send myco/jenkins/build/success with_env=[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT] """ data_dict = {} if with_env: if isinstance(with_env, list): data_dict["environ"] = _dict_subset(with_env, dict(os.environ)) else: data_dict["environ"] = dict(os.environ) if with_grains: if isinstance(with_grains, list): data_dict["grains"] = _dict_subset(with_grains, __grains__) else: data_dict["grains"] = __grains__.value() if with_pillar: if isinstance(with_pillar, list): data_dict["pillar"] = _dict_subset(with_pillar, __pillar__) else: data_dict["pillar"] = __pillar__.value() if with_env_opts: data_dict["saltenv"] = __opts__.get("saltenv", "base") data_dict["pillarenv"] = __opts__.get("pillarenv") if kwargs: data_dict.update(kwargs) # Allow values in the ``data`` arg to override any of the above values. if isinstance(data, Mapping): data_dict.update(data) if ( __opts__.get("local") or __opts__.get("file_client") == "local" or __opts__.get("master_type") == "disable" ) and not __opts__.get("use_master_when_local"): return fire(data_dict, tag) else: return fire_master(data_dict, tag, preload=preload)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/event.py
0.447943
0.15428
event.py
pypi
from datetime import datetime import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError __virtualname__ = "timezone" def __virtual__(): """ Only for macOS """ if not salt.utils.platform.is_darwin(): return ( False, "The mac_timezone module could not be loaded: " "module only works on macOS systems.", ) return __virtualname__ def _get_date_time_format(dt_string): """ Function that detects the date/time format for the string passed. :param str dt_string: A date/time string :return: The format of the passed dt_string :rtype: str :raises: SaltInvocationError on Invalid Date/Time string """ valid_formats = [ "%H:%M", "%H:%M:%S", "%m:%d:%y", "%m:%d:%Y", "%m/%d/%y", "%m/%d/%Y", ] for dt_format in valid_formats: try: datetime.strptime(dt_string, dt_format) return dt_format except ValueError: continue msg = "Invalid Date/Time Format: {}".format(dt_string) raise SaltInvocationError(msg) def get_date(): """ Displays the current date :return: the system date :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_date """ ret = salt.utils.mac_utils.execute_return_result("systemsetup -getdate") return salt.utils.mac_utils.parse_return(ret) def set_date(date): """ Set the current month, day, and year :param str date: The date to set. Valid date formats are: - %m:%d:%y - %m:%d:%Y - %m/%d/%y - %m/%d/%Y :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Date format :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_date 1/13/2016 """ date_format = _get_date_time_format(date) dt_obj = datetime.strptime(date, date_format) cmd = "systemsetup -setdate {}".format(dt_obj.strftime("%m:%d:%Y")) return salt.utils.mac_utils.execute_return_success(cmd) def get_time(): """ Get the current system time. :return: The current time in 24 hour format :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_time """ ret = salt.utils.mac_utils.execute_return_result("systemsetup -gettime") return salt.utils.mac_utils.parse_return(ret) def set_time(time): """ Sets the current time. Must be in 24 hour format. :param str time: The time to set in 24 hour format. The value must be double quoted. ie: '"17:46"' :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Time format :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_time '"17:34"' """ # time must be double quoted '"17:46"' time_format = _get_date_time_format(time) dt_obj = datetime.strptime(time, time_format) cmd = "systemsetup -settime {}".format(dt_obj.strftime("%H:%M:%S")) return salt.utils.mac_utils.execute_return_success(cmd) def get_zone(): """ Displays the current time zone :return: The current time zone :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_zone """ ret = salt.utils.mac_utils.execute_return_result("systemsetup -gettimezone") return salt.utils.mac_utils.parse_return(ret) def get_zonecode(): """ Displays the current time zone abbreviated code :return: The current time zone code :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_zonecode """ return salt.utils.mac_utils.execute_return_result("date +%Z") def get_offset(): """ Displays the current time zone offset :return: The current time zone offset :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_offset """ return salt.utils.mac_utils.execute_return_result("date +%z") def list_zones(): """ Displays a list of available time zones. Use this list when setting a time zone using ``timezone.set_zone`` :return: a list of time zones :rtype: list CLI Example: .. code-block:: bash salt '*' timezone.list_zones """ ret = salt.utils.mac_utils.execute_return_result("systemsetup -listtimezones") zones = salt.utils.mac_utils.parse_return(ret) return [x.strip() for x in zones.splitlines()] def set_zone(time_zone): """ Set the local time zone. Use ``timezone.list_zones`` to list valid time_zone arguments :param str time_zone: The time zone to apply :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Timezone :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_zone America/Denver """ if time_zone not in list_zones(): raise SaltInvocationError("Invalid Timezone: {}".format(time_zone)) salt.utils.mac_utils.execute_return_success( "systemsetup -settimezone {}".format(time_zone) ) return time_zone in get_zone() def zone_compare(time_zone): """ Compares the given timezone name with the system timezone name. :return: True if they are the same, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' timezone.zone_compare America/Boise """ return time_zone == get_zone() def get_using_network_time(): """ Display whether network time is on or off :return: True if network time is on, False if off :rtype: bool CLI Example: .. code-block:: bash salt '*' timezone.get_using_network_time """ ret = salt.utils.mac_utils.execute_return_result("systemsetup -getusingnetworktime") return ( salt.utils.mac_utils.validate_enabled(salt.utils.mac_utils.parse_return(ret)) == "on" ) def set_using_network_time(enable): """ Set whether network time is on or off. :param enable: True to enable, False to disable. Can also use 'on' or 'off' :type: str bool :return: True if successful, False if not :rtype: bool :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_using_network_time True """ state = salt.utils.mac_utils.validate_enabled(enable) cmd = "systemsetup -setusingnetworktime {}".format(state) salt.utils.mac_utils.execute_return_success(cmd) return state == salt.utils.mac_utils.validate_enabled(get_using_network_time()) def get_time_server(): """ Display the currently set network time server. :return: the network time server :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_time_server """ ret = salt.utils.mac_utils.execute_return_result( "systemsetup -getnetworktimeserver" ) return salt.utils.mac_utils.parse_return(ret) def set_time_server(time_server="time.apple.com"): """ Designates a network time server. Enter the IP address or DNS name for the network time server. :param time_server: IP or DNS name of the network time server. If nothing is passed the time server will be set to the macOS default of 'time.apple.com' :type: str :return: True if successful, False if not :rtype: bool :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_time_server time.acme.com """ cmd = "systemsetup -setnetworktimeserver {}".format(time_server) salt.utils.mac_utils.execute_return_success(cmd) return time_server in get_time_server() def get_hwclock(): """ Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock """ # Need to search for a way to figure it out ... return False def set_hwclock(clock): """ Sets the hardware clock to be either UTC or localtime CLI Example: .. code-block:: bash salt '*' timezone.set_hwclock UTC """ # Need to search for a way to figure it out ... return False
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/mac_timezone.py
0.856122
0.1929
mac_timezone.py
pypi
import logging try: import grp except ImportError: pass log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "group" def __virtual__(): """ Set the group module if the kernel is AIX """ if __grains__["kernel"] == "AIX": return __virtualname__ return ( False, "The aix_group execution module failed to load: only available on AIX systems.", ) def add(name, gid=None, system=False, root=None): """ Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456 """ cmd = "mkgroup " if system and root is not None: cmd += "-a " if gid: cmd += "id={} ".format(gid) cmd += name ret = __salt__["cmd.run_all"](cmd, python_shell=False) return not ret["retcode"] def delete(name): """ Remove the named group CLI Example: .. code-block:: bash salt '*' group.delete foo """ ret = __salt__["cmd.run_all"]("rmgroup {}".format(name), python_shell=False) return not ret["retcode"] def info(name): """ Return information about a group CLI Example: .. code-block:: bash salt '*' group.info foo """ try: grinfo = grp.getgrnam(name) except KeyError: return {} else: return { "name": grinfo.gr_name, "passwd": grinfo.gr_passwd, "gid": grinfo.gr_gid, "members": grinfo.gr_mem, } def getent(refresh=False): """ Return info on all groups CLI Example: .. code-block:: bash salt '*' group.getent """ if "group.getent" in __context__ and not refresh: return __context__["group.getent"] ret = [] for grinfo in grp.getgrall(): ret.append(info(grinfo.gr_name)) __context__["group.getent"] = ret return ret def chgid(name, gid): """ Change the gid for a named group CLI Example: .. code-block:: bash salt '*' group.chgid foo 4376 """ pre_gid = __salt__["file.group_to_gid"](name) if gid == pre_gid: return True cmd = "chgroup id={} {}".format(gid, name) __salt__["cmd.run"](cmd, python_shell=False) post_gid = __salt__["file.group_to_gid"](name) if post_gid != pre_gid: return post_gid == gid return False def adduser(name, username, root=None): """ Add a user in the group. CLI Example: .. code-block:: bash salt '*' group.adduser foo bar Verifies if a valid username 'bar' as a member of an existing group 'foo', if not then adds it. """ cmd = "chgrpmem -m + {} {}".format(username, name) retcode = __salt__["cmd.retcode"](cmd, python_shell=False) return not retcode def deluser(name, username, root=None): """ Remove a user from the group. CLI Example: .. code-block:: bash salt '*' group.deluser foo bar Removes a member user 'bar' from a group 'foo'. If group is not present then returns True. """ grp_info = __salt__["group.info"](name) try: if username in grp_info["members"]: cmd = "chgrpmem -m - {} {}".format(username, name) ret = __salt__["cmd.run"](cmd, python_shell=False) return not ret["retcode"] else: return True except Exception: # pylint: disable=broad-except return True def members(name, members_list, root=None): """ Replaces members of the group with a provided list. CLI Example: .. code-block:: bash salt '*' group.members foo 'user1,user2,user3,...' Replaces a membership list for a local group 'foo'. foo:x:1234:user1,user2,user3,... """ cmd = "chgrpmem -m = {} {}".format(members_list, name) retcode = __salt__["cmd.retcode"](cmd, python_shell=False) return not retcode
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/aix_group.py
0.552902
0.165492
aix_group.py
pypi
import copy import fnmatch import logging import salt.utils.dictupdate # Import salt modules import salt.utils.napalm from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ["*"] __virtualname__ = "napalm_formula" log = logging.getLogger(__name__) def __virtual__(): """ Available only on NAPALM Minions. """ return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): """ Generate all the possible paths within an OpenConfig-like object. This function returns a generator. """ if not key: key = "" if not container: container = "config" for model_key, model_value in model.items(): if key: key_depth = "{prev_key}{delim}{cur_key}".format( prev_key=key, delim=delim, cur_key=model_key ) else: key_depth = model_key if model_key == container: yield key_depth else: yield from _container_path( model_value, key=key_depth, container=container, delim=delim ) def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): """ Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config """ return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): """ Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True """ if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): """ Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar """ return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def dictupdate(dest, upd, recursive_update=True, merge_lists=False): """ Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. """ return salt.utils.dictupdate.update( dest, upd, recursive_update=recursive_update, merge_lists=merge_lists ) def defaults(model, defaults_, delim="//", flipped_merge=False): """ Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. """ merged = {} log.debug("Applying the defaults:") log.debug(defaults_) log.debug("openconfig like dictionary:") log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug("Comparing %s to %s", model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or not len( model_path.split(delim) ) == len(default_path.split(delim)): continue log.debug("%s matches %s", model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list( defaults_, default_path, delimiter=delim ) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug("Complete default dictionary") log.debug(merged) log.debug("Merging with the model") log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): """ Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; """ value = traverse(dictionary, field) if value is None: return "" if prepend is None: prepend = field.replace("_", "-") if append is None: if __grains__["os"] in ("junos",): append = ";" else: append = "" if quotes: value = '"{value}"'.format(value=value) return "{prepend} {value}{append}".format( prepend=prepend, value=value, append=append ) def render_fields(dictionary, *fields, **opts): """ This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" """ results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if "indent" not in opts: opts["indent"] = 0 if "separator" not in opts: opts["separator"] = "\n{ind}".format(ind=" " * opts["indent"]) return opts["separator"].join(results)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/napalm_formula.py
0.674158
0.231745
napalm_formula.py
pypi
import binascii import hashlib import logging import re import shlex import salt.modules.cmdmod import salt.utils.path log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "pdbedit" # Function aliases __func_alias__ = { "list_users": "list", "get_user": "get", } def __virtual__(): """ Provides pdbedit if available """ # NOTE: check for pdbedit command if not salt.utils.path.which("pdbedit"): return (False, "pdbedit command is not available") # NOTE: check version is >= 4.5.x ver = salt.modules.cmdmod.run("pdbedit -V") ver_regex = re.compile(r"^Version\s(\d+)\.(\d+)\.(\d+).*$") ver_match = ver_regex.match(ver) if not ver_match: return (False, "pdbedit -V returned an unknown version format") if not (int(ver_match.group(1)) >= 4 and int(ver_match.group(2)) >= 5): return (False, "pdbedit is to old, 4.5.0 or newer is required") try: hashlib.new("md4", "".encode("utf-16le")) except ValueError: return (False, "Hash type md4 unsupported") return __virtualname__ def generate_nt_hash(password): """ Generate a NT HASH CLI Example: .. code-block:: bash salt '*' pdbedit.generate_nt_hash my_passwd """ return binascii.hexlify( hashlib.new("md4", password.encode("utf-16le")).digest() ).upper() def list_users(verbose=True, hashes=False): """ List user accounts verbose : boolean return all information hashes : boolean include NT HASH and LM HASH in verbose output CLI Example: .. code-block:: bash salt '*' pdbedit.list """ users = {} if verbose else [] if verbose: # parse detailed user data res = __salt__["cmd.run_all"]( "pdbedit --list --verbose {hashes}".format( hashes="--smbpasswd-style" if hashes else "" ), ) if res["retcode"] > 0: log.error(res["stderr"] if "stderr" in res else res["stdout"]) return users for batch in re.split("\n-+|-+\n", res["stdout"]): user_data = {} last_label = None for line in batch.splitlines(): if not line.strip(): continue label, sep, data = line.partition(":") label = label.strip().lower() data = data.strip() if not sep: user_data[last_label] += line.strip() else: last_label = label user_data[label] = data if user_data: users[user_data["unix username"]] = user_data else: # list users res = __salt__["cmd.run_all"]("pdbedit --list") if res["retcode"] > 0: return {"Error": res["stderr"] if "stderr" in res else res["stdout"]} for user in res["stdout"].splitlines(): if ":" not in user: continue user_data = user.split(":") if len(user_data) >= 3: users.append(user_data[0]) return users def get_user(login, hashes=False): """ Get user account details login : string login name hashes : boolean include NTHASH and LMHASH in verbose output CLI Example: .. code-block:: bash salt '*' pdbedit.get kaylee """ users = list_users(verbose=True, hashes=hashes) return users[login] if login in users else {} def delete(login): """ Delete user account login : string login name CLI Example: .. code-block:: bash salt '*' pdbedit.delete wash """ if login in list_users(False): res = __salt__["cmd.run_all"]( "pdbedit --delete {login}".format(login=shlex.quote(login)), ) if res["retcode"] > 0: return {login: res["stderr"] if "stderr" in res else res["stdout"]} return {login: "deleted"} return {login: "absent"} def create(login, password, password_hashed=False, machine_account=False): """ Create user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text machine_account : boolean set to create a machine trust account instead CLI Example: .. code-block:: bash salt '*' pdbedit.create zoe 9764951149F84E770889011E1DC4A927 nthash salt '*' pdbedit.create river 1sw4ll0w3d4bug """ ret = "unchanged" # generate nt hash if needed if password_hashed: password_hash = password.upper() password = "" # wipe password else: password_hash = generate_nt_hash(password) # create user if login not in list_users(False): # NOTE: --create requires a password, even if blank res = __salt__["cmd.run_all"]( cmd="pdbedit --create --user {login} -t {machine}".format( login=shlex.quote(login), machine="--machine" if machine_account else "", ), stdin="{password}\n{password}\n".format(password=password), ) if res["retcode"] > 0: return {login: res["stderr"] if "stderr" in res else res["stdout"]} ret = "created" # update password if needed user = get_user(login, True) if user["nt hash"] != password_hash: res = __salt__["cmd.run_all"]( "pdbedit --modify --user {login} --set-nt-hash={nthash}".format( login=shlex.quote(login), nthash=shlex.quote(password_hash) ), ) if res["retcode"] > 0: return {login: res["stderr"] if "stderr" in res else res["stdout"]} if ret != "created": ret = "updated" return {login: ret} def modify( login, password=None, password_hashed=False, domain=None, profile=None, script=None, drive=None, homedir=None, fullname=None, account_desc=None, account_control=None, machine_sid=None, user_sid=None, reset_login_hours=False, reset_bad_password_count=False, ): """ Modify user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter .. note:: if user is absent and password is provided, the user will be created CLI Example: .. code-block:: bash salt '*' pdbedit.modify inara fullname='Inara Serra' salt '*' pdbedit.modify simon password=r1v3r salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile' salt '*' pdbedit.modify mal account_control=NX """ ret = "unchanged" # flag mapping flags = { "domain": "--domain=", "full name": "--fullname=", "account desc": "--account-desc=", "home directory": "--homedir=", "homedir drive": "--drive=", "profile path": "--profile=", "logon script": "--script=", "account flags": "--account-control=", "user sid": "-U ", "machine sid": "-M ", } # field mapping provided = { "domain": domain, "full name": fullname, "account desc": account_desc, "home directory": homedir, "homedir drive": drive, "profile path": profile, "logon script": script, "account flags": account_control, "user sid": user_sid, "machine sid": machine_sid, } # update password if password: ret = create(login, password, password_hashed)[login] if ret not in ["updated", "created", "unchanged"]: return {login: ret} elif login not in list_users(False): return {login: "absent"} # check for changes current = get_user(login, hashes=True) changes = {} for key, val in provided.items(): if key in ["user sid", "machine sid"]: if ( val is not None and key in current and not current[key].endswith(str(val)) ): changes[key] = str(val) elif key in ["account flags"]: if val is not None: if val.startswith("["): val = val[1:-1] new = [] for f in val.upper(): if f not in ["N", "D", "H", "L", "X"]: log.warning( "pdbedit.modify - unknown %s flag for account_control, ignored", f, ) else: new.append(f) changes[key] = "[{flags}]".format(flags="".join(new)) else: if val is not None and key in current and current[key] != val: changes[key] = val # apply changes if len(changes) > 0 or reset_login_hours or reset_bad_password_count: cmds = [] for change in changes: cmds.append( "{flag}{value}".format( flag=flags[change], value=shlex.quote(changes[change]), ) ) if reset_login_hours: cmds.append("--logon-hours-reset") if reset_bad_password_count: cmds.append("--bad-password-count-reset") res = __salt__["cmd.run_all"]( "pdbedit --modify --user {login} {changes}".format( login=shlex.quote(login), changes=" ".join(cmds), ), ) if res["retcode"] > 0: return {login: res["stderr"] if "stderr" in res else res["stdout"]} if ret != "created": ret = "updated" return {login: ret} # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/pdbedit.py
0.42656
0.17892
pdbedit.py
pypi
import salt.utils.args try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = "redis" def __virtual__(): """ Only load this module if redis python module is installed """ if HAS_REDIS: return __virtualname__ else: return ( False, "The redis execution module failed to load: the redis python library is not" " available.", ) def _connect(host=None, port=None, db=None, password=None): """ Returns an instance of the redis client """ if not host: host = __salt__["config.option"]("redis.host") if not port: port = __salt__["config.option"]("redis.port") if not db: db = __salt__["config.option"]("redis.db") if not password: password = __salt__["config.option"]("redis.password") return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): """ Returns an instance of the redis client """ if host is None: host = __salt__["config.option"]("redis_sentinel.host", "localhost") if port is None: port = __salt__["config.option"]("redis_sentinel.port", 26379) if password is None: password = __salt__["config.option"]("redis_sentinel.password") return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): """ Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof """ server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): """ Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave """ server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern="*", host=None, port=None, db=None, password=None): """ Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port """ server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): """ Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens """ server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): """ Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize """ server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): """ Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo """ # Get connection args from keywords if set conn_args = {} for arg in ["host", "port", "db", "password"]: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): """ Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo """ server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): """ Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 """ server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): """ Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 """ server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): """ Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall """ server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): """ Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb """ server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): """ Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo """ server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): """ Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 """ host = options.get("host", None) port = options.get("port", None) database = options.get("db", None) password = options.get("password", None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): """ Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field """ server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): """ Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field """ server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): """ Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash """ server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): """ Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 """ server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat( key, field, increment=1.0, host=None, port=None, db=None, password=None ): """ Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 """ server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): """ Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash """ server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): """ Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 """ host = options.get("host", None) port = options.get("port", None) database = options.get("db", None) password = options.get("password", None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): """ Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 """ host = fieldsvals.pop("host", None) port = fieldsvals.pop("port", None) database = fieldsvals.pop("db", None) password = fieldsvals.pop("password", None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): """ Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value """ server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): """ Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value """ server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): """ Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 """ server = _connect(host, port, db, password) return server.hvals(key) def hscan( key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None ): """ Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 """ server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): """ Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info """ server = _connect(host, port, db, password) return server.info() def keys(pattern="*", host=None, port=None, db=None, password=None): """ Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* """ server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): """ Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo """ server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): """ Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave """ # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): """ Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list """ server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): """ Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 """ server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): """ Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping """ server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): """ Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save """ server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): """ Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar """ server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): """ Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown """ server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof( master_host=None, master_port=None, host=None, port=None, db=None, password=None ): """ Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof """ if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): """ Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set """ server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): """ Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time """ server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): """ Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted """ server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): """ Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 """ server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): """ Get ip for sentinel master .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' """ server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(("master_host", "master_port"), ret))) def get_master_ip(host=None, port=None, password=None): """ Get host information about slave .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip """ server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get("master_host", ""), srv_info.get("master_port", "")) return dict(list(zip(("master_host", "master_port"), ret)))
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/redismod.py
0.729809
0.177347
redismod.py
pypi
HAS_SHADE = False try: import shade from shade.exc import OpenStackCloudException HAS_SHADE = True except ImportError: pass __virtualname__ = "keystoneng" def __virtual__(): """ Only load this module if shade python module is installed """ if HAS_SHADE: return __virtualname__ return ( False, "The keystoneng execution module failed to load: shade python module is not" " available", ) def compare_changes(obj, **kwargs): """ Compare two dicts returning only keys that exist in the first dict and are different in the second one """ changes = {} for k, v in obj.items(): if k in kwargs: if v != kwargs[k]: changes[k] = kwargs[k] return changes def get_entity(ent_type, **kwargs): """ Attempt to query Keystone for more information about an entity """ try: func = "keystoneng.{}_get".format(ent_type) ent = __salt__[func](**kwargs) except OpenStackCloudException as e: # NOTE(SamYaple): If this error was something other than Forbidden we # reraise the issue since we are not prepared to handle it if "HTTP 403" not in e.inner_exception[1][0]: raise # NOTE(SamYaple): The user may be authorized to perform the function # they are trying to do, but not authorized to search. In such a # situation we want to trust that the user has passed a valid id, even # though we cannot validate that this is a valid id ent = kwargs["name"] return ent def _clean_kwargs(keep_name=False, **kwargs): """ Sanatize the arguments for use with shade """ if "name" in kwargs and not keep_name: kwargs["name_or_id"] = kwargs.pop("name") return __utils__["args.clean_kwargs"](**kwargs) def setup_clouds(auth=None): """ Call functions to create Shade cloud objects in __context__ to take advantage of Shade's in-memory caching across several states """ get_operator_cloud(auth) get_openstack_cloud(auth) def get_operator_cloud(auth=None): """ Return an operator_cloud """ if auth is None: auth = __salt__["config.option"]("keystone", {}) if "shade_opcloud" in __context__: if __context__["shade_opcloud"].auth == auth: return __context__["shade_opcloud"] __context__["shade_opcloud"] = shade.operator_cloud(**auth) return __context__["shade_opcloud"] def get_openstack_cloud(auth=None): """ Return an openstack_cloud """ if auth is None: auth = __salt__["config.option"]("keystone", {}) if "shade_oscloud" in __context__: if __context__["shade_oscloud"].auth == auth: return __context__["shade_oscloud"] __context__["shade_oscloud"] = shade.openstack_cloud(**auth) return __context__["shade_oscloud"] def group_create(auth=None, **kwargs): """ Create a group CLI Example: .. code-block:: bash salt '*' keystoneng.group_create name=group1 salt '*' keystoneng.group_create name=group2 domain=domain1 description='my group2' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_group(**kwargs) def group_delete(auth=None, **kwargs): """ Delete a group CLI Example: .. code-block:: bash salt '*' keystoneng.group_delete name=group1 salt '*' keystoneng.group_delete name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.group_delete name=0e4febc2a5ab4f2c8f374b054162506d """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_group(**kwargs) def group_update(auth=None, **kwargs): """ Update a group CLI Example: .. code-block:: bash salt '*' keystoneng.group_update name=group1 description='new description' salt '*' keystoneng.group_create name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e new_name=newgroupname salt '*' keystoneng.group_create name=0e4febc2a5ab4f2c8f374b054162506d new_name=newgroupname """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) if "new_name" in kwargs: kwargs["name"] = kwargs.pop("new_name") return cloud.update_group(**kwargs) def group_list(auth=None, **kwargs): """ List groups CLI Example: .. code-block:: bash salt '*' keystoneng.group_list salt '*' keystoneng.group_list domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_groups(**kwargs) def group_search(auth=None, **kwargs): """ Search for groups CLI Example: .. code-block:: bash salt '*' keystoneng.group_search name=group1 salt '*' keystoneng.group_search domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_groups(**kwargs) def group_get(auth=None, **kwargs): """ Get a single group CLI Example: .. code-block:: bash salt '*' keystoneng.group_get name=group1 salt '*' keystoneng.group_get name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.group_get name=0e4febc2a5ab4f2c8f374b054162506d """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_group(**kwargs) def project_create(auth=None, **kwargs): """ Create a project CLI Example: .. code-block:: bash salt '*' keystoneng.project_create name=project1 salt '*' keystoneng.project_create name=project2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.project_create name=project3 enabled=False description='my project3' """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_project(**kwargs) def project_delete(auth=None, **kwargs): """ Delete a project CLI Example: .. code-block:: bash salt '*' keystoneng.project_delete name=project1 salt '*' keystoneng.project_delete name=project2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.project_delete name=f315afcf12f24ad88c92b936c38f2d5a """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_project(**kwargs) def project_update(auth=None, **kwargs): """ Update a project CLI Example: .. code-block:: bash salt '*' keystoneng.project_update name=project1 new_name=newproject salt '*' keystoneng.project_update name=project2 enabled=False description='new description' """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) if "new_name" in kwargs: kwargs["name"] = kwargs.pop("new_name") return cloud.update_project(**kwargs) def project_list(auth=None, **kwargs): """ List projects CLI Example: .. code-block:: bash salt '*' keystoneng.project_list salt '*' keystoneng.project_list domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_projects(**kwargs) def project_search(auth=None, **kwargs): """ Search projects CLI Example: .. code-block:: bash salt '*' keystoneng.project_search salt '*' keystoneng.project_search name=project1 salt '*' keystoneng.project_search domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_projects(**kwargs) def project_get(auth=None, **kwargs): """ Get a single project CLI Example: .. code-block:: bash salt '*' keystoneng.project_get name=project1 salt '*' keystoneng.project_get name=project2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.project_get name=f315afcf12f24ad88c92b936c38f2d5a """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_project(**kwargs) def domain_create(auth=None, **kwargs): """ Create a domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_create name=domain1 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_domain(**kwargs) def domain_delete(auth=None, **kwargs): """ Delete a domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_delete name=domain1 salt '*' keystoneng.domain_delete name=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_domain(**kwargs) def domain_update(auth=None, **kwargs): """ Update a domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_update name=domain1 new_name=newdomain salt '*' keystoneng.domain_update name=domain1 enabled=True description='new description' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) if "new_name" in kwargs: kwargs["name"] = kwargs.pop("new_name") return cloud.update_domain(**kwargs) def domain_list(auth=None, **kwargs): """ List domains CLI Example: .. code-block:: bash salt '*' keystoneng.domain_list """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_domains(**kwargs) def domain_search(auth=None, **kwargs): """ Search domains CLI Example: .. code-block:: bash salt '*' keystoneng.domain_search salt '*' keystoneng.domain_search name=domain1 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_domains(**kwargs) def domain_get(auth=None, **kwargs): """ Get a single domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_get name=domain1 salt '*' keystoneng.domain_get name=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_domain(**kwargs) def role_create(auth=None, **kwargs): """ Create a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_create name=role1 salt '*' keystoneng.role_create name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_role(**kwargs) def role_delete(auth=None, **kwargs): """ Delete a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_delete name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.role_delete name=1eb6edd5525e4ac39af571adee673559 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_role(**kwargs) def role_update(auth=None, **kwargs): """ Update a role CLI Example: .. code-block:: bash salt '*' keystoneng.role_update name=role1 new_name=newrole salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) if "new_name" in kwargs: kwargs["name"] = kwargs.pop("new_name") return cloud.update_role(**kwargs) def role_list(auth=None, **kwargs): """ List roles CLI Example: .. code-block:: bash salt '*' keystoneng.role_list salt '*' keystoneng.role_list domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_roles(**kwargs) def role_search(auth=None, **kwargs): """ Search roles CLI Example: .. code-block:: bash salt '*' keystoneng.role_search salt '*' keystoneng.role_search name=role1 salt '*' keystoneng.role_search domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_roles(**kwargs) def role_get(auth=None, **kwargs): """ Get a single role CLI Example: .. code-block:: bash salt '*' keystoneng.role_get name=role1 salt '*' keystoneng.role_get name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.role_get name=1eb6edd5525e4ac39af571adee673559 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_role(**kwargs) def user_create(auth=None, **kwargs): """ Create a user CLI Example: .. code-block:: bash salt '*' keystoneng.user_create name=user1 salt '*' keystoneng.user_create name=user2 password=1234 enabled=False salt '*' keystoneng.user_create name=user3 domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_user(**kwargs) def user_delete(auth=None, **kwargs): """ Delete a user CLI Example: .. code-block:: bash salt '*' keystoneng.user_delete name=user1 salt '*' keystoneng.user_delete name=user2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.user_delete name=a42cbbfa1e894e839fd0f584d22e321f """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_user(**kwargs) def user_update(auth=None, **kwargs): """ Update a user CLI Example: .. code-block:: bash salt '*' keystoneng.user_update name=user1 enabled=False description='new description' salt '*' keystoneng.user_update name=user1 new_name=newuser """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) if "new_name" in kwargs: kwargs["name"] = kwargs.pop("new_name") return cloud.update_user(**kwargs) def user_list(auth=None, **kwargs): """ List users CLI Example: .. code-block:: bash salt '*' keystoneng.user_list salt '*' keystoneng.user_list domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_users(**kwargs) def user_search(auth=None, **kwargs): """ List users CLI Example: .. code-block:: bash salt '*' keystoneng.user_list salt '*' keystoneng.user_list domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_users(**kwargs) def user_get(auth=None, **kwargs): """ Get a single user CLI Example: .. code-block:: bash salt '*' keystoneng.user_get name=user1 salt '*' keystoneng.user_get name=user1 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.user_get name=02cffaa173b2460f98e40eda3748dae5 """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_user(**kwargs) def endpoint_create(auth=None, **kwargs): """ Create an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_create interface=admin service=glance url=https://example.org:9292 salt '*' keystoneng.endpoint_create interface=public service=glance region=RegionOne url=https://example.org:9292 salt '*' keystoneng.endpoint_create interface=admin service=glance url=https://example.org:9292 enabled=True """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_endpoint(**kwargs) def endpoint_delete(auth=None, **kwargs): """ Delete an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_endpoint(**kwargs) def endpoint_update(auth=None, **kwargs): """ Update an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 interface=public enabled=False salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 region=newregion salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 service_name_or_id=glance url=https://example.org:9292 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.update_endpoint(**kwargs) def endpoint_list(auth=None, **kwargs): """ List endpoints CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_list """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_endpoints(**kwargs) def endpoint_search(auth=None, **kwargs): """ Search endpoints CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_search salt '*' keystoneng.endpoint_search id=02cffaa173b2460f98e40eda3748dae5 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_endpoints(**kwargs) def endpoint_get(auth=None, **kwargs): """ Get a single endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_get id=02cffaa173b2460f98e40eda3748dae5 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_endpoint(**kwargs) def service_create(auth=None, **kwargs): """ Create a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_create name=glance type=image salt '*' keystoneng.service_create name=glance type=image description="Image" """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_service(**kwargs) def service_delete(auth=None, **kwargs): """ Delete a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_delete name=glance salt '*' keystoneng.service_delete name=39cc1327cdf744ab815331554430e8ec """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_service(**kwargs) def service_update(auth=None, **kwargs): """ Update a service CLI Example: .. code-block:: bash salt '*' keystoneng.service_update name=cinder type=volumev2 salt '*' keystoneng.service_update name=cinder description='new description' salt '*' keystoneng.service_update name=ab4d35e269f147b3ae2d849f77f5c88f enabled=False """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.update_service(**kwargs) def service_list(auth=None, **kwargs): """ List services CLI Example: .. code-block:: bash salt '*' keystoneng.service_list """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_services(**kwargs) def service_search(auth=None, **kwargs): """ Search services CLI Example: .. code-block:: bash salt '*' keystoneng.service_search salt '*' keystoneng.service_search name=glance salt '*' keystoneng.service_search name=135f0403f8e544dc9008c6739ecda860 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_services(**kwargs) def service_get(auth=None, **kwargs): """ Get a single service CLI Example: .. code-block:: bash salt '*' keystoneng.service_get name=glance salt '*' keystoneng.service_get name=75a5804638944b3ab54f7fbfcec2305a """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_service(**kwargs) def role_assignment_list(auth=None, **kwargs): """ List role assignments CLI Example: .. code-block:: bash salt '*' keystoneng.role_assignment_list """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_role_assignments(**kwargs) def role_grant(auth=None, **kwargs): """ Grant a role in a project/domain to a user/group CLI Example: .. code-block:: bash salt '*' keystoneng.role_grant name=role1 user=user1 project=project1 salt '*' keystoneng.role_grant name=ddbe3e0ed74e4c7f8027bad4af03339d group=user1 project=project1 domain=domain1 salt '*' keystoneng.role_grant name=ddbe3e0ed74e4c7f8027bad4af03339d group=19573afd5e4241d8b65c42215bae9704 project=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.grant_role(**kwargs) def role_revoke(auth=None, **kwargs): """ Grant a role in a project/domain to a user/group CLI Example: .. code-block:: bash salt '*' keystoneng.role_revoke name=role1 user=user1 project=project1 salt '*' keystoneng.role_revoke name=ddbe3e0ed74e4c7f8027bad4af03339d group=user1 project=project1 domain=domain1 salt '*' keystoneng.role_revoke name=ddbe3e0ed74e4c7f8027bad4af03339d group=19573afd5e4241d8b65c42215bae9704 project=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.revoke_role(**kwargs)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/keystoneng.py
0.622115
0.154567
keystoneng.py
pypi
import logging import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): """ Only work on Gentoo systems with eselect installed """ if __grains__["os"] == "Gentoo" and salt.utils.path.which("eselect"): return "eselect" return ( False, "The eselect execution module cannot be loaded: either the system is not Gentoo" " or the eselect binary is not in the path.", ) def exec_action( module, action, module_parameter=None, action_parameter=None, state_only=False ): """ Execute an arbitrary action on a module. module name of the module to be executed action name of the module's action to be run module_parameter additional params passed to the defined module action_parameter additional params passed to the defined action state_only don't return any output but only the success/failure of the operation CLI Example (updating the ``php`` implementation used for ``apache2``): .. code-block:: bash salt '*' eselect.exec_action php update action_parameter='apache2' """ out = __salt__["cmd.run"]( "eselect --brief --colour=no {} {} {} {}".format( module, module_parameter or "", action, action_parameter or "" ), python_shell=False, ) out = out.strip().split("\n") if out[0].startswith("!!! Error"): return False if state_only: return True if not out: return False if len(out) == 1 and not out[0].strip(): return False return out def get_modules(): """ List available ``eselect`` modules. CLI Example: .. code-block:: bash salt '*' eselect.get_modules """ modules = [] module_list = exec_action("modules", "list", action_parameter="--only-names") if not module_list: return None for module in module_list: if module not in ["help", "usage", "version"]: modules.append(module) return modules def get_target_list(module, action_parameter=None): """ List available targets for the given module. module name of the module to be queried for its targets action_parameter additional params passed to the defined action .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' eselect.get_target_list kernel """ exec_output = exec_action(module, "list", action_parameter=action_parameter) if not exec_output: return None target_list = [] if isinstance(exec_output, list): for item in exec_output: target_list.append(item.split(None, 1)[0]) return target_list return None def get_current_target(module, module_parameter=None, action_parameter=None): """ Get the currently selected target for the given module. module name of the module to be queried for its current target module_parameter additional params passed to the defined module action_parameter additional params passed to the 'show' action CLI Example (current target of system-wide ``java-vm``): .. code-block:: bash salt '*' eselect.get_current_target java-vm action_parameter='system' CLI Example (current target of ``kernel`` symlink): .. code-block:: bash salt '*' eselect.get_current_target kernel """ result = exec_action( module, "show", module_parameter=module_parameter, action_parameter=action_parameter, )[0] if not result: return None if result == "(unset)": return None return result def set_target(module, target, module_parameter=None, action_parameter=None): """ Set the target for the given module. Target can be specified by index or name. module name of the module for which a target should be set target name of the target to be set for this module module_parameter additional params passed to the defined module action_parameter additional params passed to the defined action CLI Example (setting target of system-wide ``java-vm``): .. code-block:: bash salt '*' eselect.set_target java-vm icedtea-bin-7 action_parameter='system' CLI Example (setting target of ``kernel`` symlink): .. code-block:: bash salt '*' eselect.set_target kernel linux-3.17.5-gentoo """ if action_parameter: action_parameter = "{} {}".format(action_parameter, target) else: action_parameter = target # get list of available modules if module not in get_modules(): log.error("Module %s not available", module) return False exec_result = exec_action( module, "set", module_parameter=module_parameter, action_parameter=action_parameter, state_only=True, ) if exec_result: return exec_result return False
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/eselect.py
0.725551
0.221067
eselect.py
pypi
import salt.utils.decorators as decorators import salt.utils.path from salt.exceptions import SaltException __func_alias__ = {"list_": "list"} # Cache the output of running which('ipvsadm') @decorators.memoize def __detect_os(): return salt.utils.path.which("ipvsadm") def __virtual__(): """ Only load if ipvsadm command exists on the system. """ if not __detect_os(): return ( False, "The lvs execution module cannot be loaded: the ipvsadm binary is not in" " the path.", ) return "lvs" def _build_cmd(**kwargs): """ Build a well-formatted ipvsadm command based on kwargs. """ cmd = "" if "service_address" in kwargs: if kwargs["service_address"]: if "protocol" in kwargs: if kwargs["protocol"] == "tcp": cmd += " -t {}".format(kwargs["service_address"]) elif kwargs["protocol"] == "udp": cmd += " -u {}".format(kwargs["service_address"]) elif kwargs["protocol"] == "fwmark": cmd += " -f {}".format(kwargs["service_address"]) else: raise SaltException( "Error: Only support tcp, udp and fwmark service protocol" ) del kwargs["protocol"] else: raise SaltException("Error: protocol should specified") if "scheduler" in kwargs: if kwargs["scheduler"]: cmd += " -s {}".format(kwargs["scheduler"]) del kwargs["scheduler"] else: raise SaltException("Error: service_address should specified") del kwargs["service_address"] if "server_address" in kwargs: if kwargs["server_address"]: cmd += " -r {}".format(kwargs["server_address"]) if "packet_forward_method" in kwargs and kwargs["packet_forward_method"]: if kwargs["packet_forward_method"] == "dr": cmd += " -g" elif kwargs["packet_forward_method"] == "tunnel": cmd += " -i" elif kwargs["packet_forward_method"] == "nat": cmd += " -m" else: raise SaltException("Error: only support dr, tunnel and nat") del kwargs["packet_forward_method"] if "weight" in kwargs and kwargs["weight"]: cmd += " -w {}".format(kwargs["weight"]) del kwargs["weight"] else: raise SaltException("Error: server_address should specified") del kwargs["server_address"] return cmd def add_service(protocol=None, service_address=None, scheduler="wlc"): """ Add a virtual service. protocol The service protocol(only support tcp, udp and fwmark service). service_address The LVS service address. scheduler Algorithm for allocating TCP connections and UDP datagrams to real servers. CLI Example: .. code-block:: bash salt '*' lvs.add_service tcp 1.1.1.1:80 rr """ cmd = "{} -A {}".format( __detect_os(), _build_cmd( protocol=protocol, service_address=service_address, scheduler=scheduler ), ) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def edit_service(protocol=None, service_address=None, scheduler=None): """ Edit the virtual service. protocol The service protocol(only support tcp, udp and fwmark service). service_address The LVS service address. scheduler Algorithm for allocating TCP connections and UDP datagrams to real servers. CLI Example: .. code-block:: bash salt '*' lvs.edit_service tcp 1.1.1.1:80 rr """ cmd = "{} -E {}".format( __detect_os(), _build_cmd( protocol=protocol, service_address=service_address, scheduler=scheduler ), ) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def delete_service(protocol=None, service_address=None): """ Delete the virtual service. protocol The service protocol(only support tcp, udp and fwmark service). service_address The LVS service address. CLI Example: .. code-block:: bash salt '*' lvs.delete_service tcp 1.1.1.1:80 """ cmd = "{} -D {}".format( __detect_os(), _build_cmd(protocol=protocol, service_address=service_address) ) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def add_server( protocol=None, service_address=None, server_address=None, packet_forward_method="dr", weight=1, **kwargs ): """ Add a real server to a virtual service. protocol The service protocol(only support ``tcp``, ``udp`` and ``fwmark`` service). service_address The LVS service address. server_address The real server address. packet_forward_method The LVS packet forwarding method(``dr`` for direct routing, ``tunnel`` for tunneling, ``nat`` for network access translation). weight The capacity of a server relative to the others in the pool. CLI Example: .. code-block:: bash salt '*' lvs.add_server tcp 1.1.1.1:80 192.168.0.11:8080 nat 1 """ cmd = "{} -a {}".format( __detect_os(), _build_cmd( protocol=protocol, service_address=service_address, server_address=server_address, packet_forward_method=packet_forward_method, weight=weight, **kwargs ), ) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def edit_server( protocol=None, service_address=None, server_address=None, packet_forward_method=None, weight=None, **kwargs ): """ Edit a real server to a virtual service. protocol The service protocol(only support ``tcp``, ``udp`` and ``fwmark`` service). service_address The LVS service address. server_address The real server address. packet_forward_method The LVS packet forwarding method(``dr`` for direct routing, ``tunnel`` for tunneling, ``nat`` for network access translation). weight The capacity of a server relative to the others in the pool. CLI Example: .. code-block:: bash salt '*' lvs.edit_server tcp 1.1.1.1:80 192.168.0.11:8080 nat 1 """ cmd = "{} -e {}".format( __detect_os(), _build_cmd( protocol=protocol, service_address=service_address, server_address=server_address, packet_forward_method=packet_forward_method, weight=weight, **kwargs ), ) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def delete_server(protocol=None, service_address=None, server_address=None): """ Delete the realserver from the virtual service. protocol The service protocol(only support ``tcp``, ``udp`` and ``fwmark`` service). service_address The LVS service address. server_address The real server address. CLI Example: .. code-block:: bash salt '*' lvs.delete_server tcp 1.1.1.1:80 192.168.0.11:8080 """ cmd = "{} -d {}".format( __detect_os(), _build_cmd( protocol=protocol, service_address=service_address, server_address=server_address, ), ) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def clear(): """ Clear the virtual server table CLI Example: .. code-block:: bash salt '*' lvs.clear """ cmd = "{} -C".format(__detect_os()) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def get_rules(): """ Get the virtual server rules CLI Example: .. code-block:: bash salt '*' lvs.get_rules """ cmd = "{} -S -n".format(__detect_os()) ret = __salt__["cmd.run"](cmd, python_shell=False) return ret def list_(protocol=None, service_address=None): """ List the virtual server table if service_address is not specified. If a service_address is selected, list this service only. CLI Example: .. code-block:: bash salt '*' lvs.list """ if service_address: cmd = "{} -L {} -n".format( __detect_os(), _build_cmd(protocol=protocol, service_address=service_address), ) else: cmd = "{} -L -n".format(__detect_os()) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = out["stdout"].strip() return ret def zero(protocol=None, service_address=None): """ Zero the packet, byte and rate counters in a service or all services. CLI Example: .. code-block:: bash salt '*' lvs.zero """ if service_address: cmd = "{} -Z {}".format( __detect_os(), _build_cmd(protocol=protocol, service_address=service_address), ) else: cmd = "{} -Z".format(__detect_os()) out = __salt__["cmd.run_all"](cmd, python_shell=False) # A non-zero return code means fail if out["retcode"]: ret = out["stderr"].strip() else: ret = True return ret def check_service(protocol=None, service_address=None, **kwargs): """ Check the virtual service exists. CLI Example: .. code-block:: bash salt '*' lvs.check_service tcp 1.1.1.1:80 """ cmd = "{}".format( _build_cmd(protocol=protocol, service_address=service_address, **kwargs) ) # Exact match if not kwargs: cmd += " " all_rules = get_rules() out = all_rules.find(cmd) if out != -1: ret = True else: ret = "Error: service not exists" return ret def check_server(protocol=None, service_address=None, server_address=None, **kwargs): """ Check the real server exists in the specified service. CLI Example: .. code-block:: bash salt '*' lvs.check_server tcp 1.1.1.1:80 192.168.0.11:8080 """ cmd = "{}".format( _build_cmd( protocol=protocol, service_address=service_address, server_address=server_address, **kwargs ) ) # Exact match if not kwargs: cmd += " " all_rules = get_rules() out = all_rules.find(cmd) if out != -1: ret = True else: ret = "Error: server not exists" return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/lvs.py
0.681197
0.167389
lvs.py
pypi
import re import salt.utils.path # Function alias to make sure not to shadow built-in's __func_alias__ = { "id_": "id", "reload_": "reload", } def __virtual__(): if salt.utils.path.which("monit") is not None: # The monit binary exists, let the module load return True return ( False, "The monit execution module cannot be loaded: the monit binary is not in the" " path.", ) def start(name): """ CLI Example: .. code-block:: bash salt '*' monit.start <service name> """ cmd = "monit start {}".format(name) return not __salt__["cmd.retcode"](cmd, python_shell=False) def stop(name): """ Stops service via monit CLI Example: .. code-block:: bash salt '*' monit.stop <service name> """ cmd = "monit stop {}".format(name) return not __salt__["cmd.retcode"](cmd, python_shell=False) def restart(name): """ Restart service via monit CLI Example: .. code-block:: bash salt '*' monit.restart <service name> """ cmd = "monit restart {}".format(name) return not __salt__["cmd.retcode"](cmd, python_shell=False) def unmonitor(name): """ Unmonitor service via monit CLI Example: .. code-block:: bash salt '*' monit.unmonitor <service name> """ cmd = "monit unmonitor {}".format(name) return not __salt__["cmd.retcode"](cmd, python_shell=False) def monitor(name): """ monitor service via monit CLI Example: .. code-block:: bash salt '*' monit.monitor <service name> """ cmd = "monit monitor {}".format(name) return not __salt__["cmd.retcode"](cmd, python_shell=False) def summary(svc_name=""): """ Display a summary from monit CLI Example: .. code-block:: bash salt '*' monit.summary salt '*' monit.summary <service name> """ ret = {} cmd = "monit summary" res = __salt__["cmd.run"](cmd).splitlines() for line in res: if "daemon is not running" in line: return dict(monit="daemon is not running", result=False) elif not line or svc_name not in line or "The Monit daemon" in line: continue else: parts = line.split("'") if len(parts) == 3: resource, name, status_ = (parts[0].strip(), parts[1], parts[2].strip()) if svc_name != "" and svc_name != name: continue if resource not in ret: ret[resource] = {} ret[resource][name] = status_ return ret def status(svc_name=""): """ Display a process status from monit CLI Example: .. code-block:: bash salt '*' monit.status salt '*' monit.status <service name> """ cmd = "monit status" res = __salt__["cmd.run"](cmd) # Monit uses a different separator since 5.18.0 if version() < "5.18.0": fieldlength = 33 else: fieldlength = 28 separator = 3 + fieldlength prostr = "Process" + " " * fieldlength s = res.replace("Process", prostr).replace("'", "").split("\n\n") entries = {} for process in s[1:-1]: pro = process.splitlines() tmp = {} for items in pro: key = items[:separator].strip() tmp[key] = items[separator - 1 :].strip() entries[pro[0].split()[1]] = tmp if svc_name == "": ret = entries else: ret = entries.get(svc_name, "No such service") return ret def reload_(): """ .. versionadded:: 2016.3.0 Reload monit configuration CLI Example: .. code-block:: bash salt '*' monit.reload """ cmd = "monit reload" return not __salt__["cmd.retcode"](cmd, python_shell=False) def configtest(): """ .. versionadded:: 2016.3.0 Test monit configuration syntax CLI Example: .. code-block:: bash salt '*' monit.configtest """ ret = {} cmd = "monit -t" out = __salt__["cmd.run_all"](cmd) if out["retcode"] != 0: ret["comment"] = "Syntax Error" ret["stderr"] = out["stderr"] ret["result"] = False return ret ret["comment"] = "Syntax OK" ret["stdout"] = out["stdout"] ret["result"] = True return ret def version(): """ .. versionadded:: 2016.3.0 Return version from monit -V CLI Example: .. code-block:: bash salt '*' monit.version """ cmd = "monit -V" out = __salt__["cmd.run"](cmd).splitlines() ret = out[0].split() return ret[-1] def id_(reset=False): """ .. versionadded:: 2016.3.0 Return monit unique id. reset : False Reset current id and generate a new id when it's True. CLI Example: .. code-block:: bash salt '*' monit.id [reset=True] """ if reset: id_pattern = re.compile(r"Monit id (?P<id>[^ ]+)") cmd = "echo y|monit -r" out = __salt__["cmd.run_all"](cmd, python_shell=True) ret = id_pattern.search(out["stdout"]).group("id") return ret if ret else False else: cmd = "monit -i" out = __salt__["cmd.run"](cmd) ret = out.split(":")[-1].strip() return ret def validate(): """ .. versionadded:: 2016.3.0 Check all services CLI Example: .. code-block:: bash salt '*' monit.validate """ cmd = "monit validate" return not __salt__["cmd.retcode"](cmd, python_shell=False)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/monit.py
0.586286
0.252885
monit.py
pypi
import logging import re import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = "license" def __virtual__(): """ Only work on Windows """ if salt.utils.platform.is_windows(): return __virtualname__ return (False, "Module win_license: module only works on Windows systems.") def installed(product_key): """ Check to see if the product key is already installed. Note: This is not 100% accurate as we can only see the last 5 digits of the license. CLI Example: .. code-block:: bash salt '*' license.installed XXXXX-XXXXX-XXXXX-XXXXX-XXXXX """ cmd = r"cscript C:\Windows\System32\slmgr.vbs /dli" out = __salt__["cmd.run"](cmd) return product_key[-5:] in out def install(product_key): """ Install the given product key CLI Example: .. code-block:: bash salt '*' license.install XXXXX-XXXXX-XXXXX-XXXXX-XXXXX """ cmd = r"cscript C:\Windows\System32\slmgr.vbs /ipk {}".format(product_key) return __salt__["cmd.run"](cmd) def uninstall(): """ Uninstall the current product key CLI Example: .. code-block:: bash salt '*' license.uninstall """ cmd = r"cscript C:\Windows\System32\slmgr.vbs /upk" return __salt__["cmd.run"](cmd) def activate(): """ Attempt to activate the current machine via Windows Activation CLI Example: .. code-block:: bash salt '*' license.activate """ cmd = r"cscript C:\Windows\System32\slmgr.vbs /ato" return __salt__["cmd.run"](cmd) def licensed(): """ Return true if the current machine is licensed correctly CLI Example: .. code-block:: bash salt '*' license.licensed """ cmd = r"cscript C:\Windows\System32\slmgr.vbs /dli" out = __salt__["cmd.run"](cmd) return "License Status: Licensed" in out def info(): """ Return information about the license, if the license is not correctly activated this will return None. CLI Example: .. code-block:: bash salt '*' license.info """ cmd = r"cscript C:\Windows\System32\slmgr.vbs /dli" out = __salt__["cmd.run"](cmd) match = re.search( r"Name: (.*)\r\nDescription: (.*)\r\nPartial Product Key: (.*)\r\nLicense" r" Status: (.*)", out, re.MULTILINE, ) if match is not None: groups = match.groups() return { "name": groups[0], "description": groups[1], "partial_key": groups[2], "licensed": "Licensed" in groups[3], } return None
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/win_license.py
0.666931
0.175609
win_license.py
pypi
HAS_SHADE = False try: import shade HAS_SHADE = True except ImportError: pass __virtualname__ = "neutronng" def __virtual__(): """ Only load this module if shade python module is installed """ if HAS_SHADE: return __virtualname__ return ( False, "The neutronng execution module failed to load: shade python module is not available", ) def compare_changes(obj, **kwargs): """ Compare two dicts returning only keys that exist in the first dict and are different in the second one """ changes = {} for key, value in obj.items(): if key in kwargs: if value != kwargs[key]: changes[key] = kwargs[key] return changes def _clean_kwargs(keep_name=False, **kwargs): """ Sanatize the arguments for use with shade """ if "name" in kwargs and not keep_name: kwargs["name_or_id"] = kwargs.pop("name") return __utils__["args.clean_kwargs"](**kwargs) def setup_clouds(auth=None): """ Call functions to create Shade cloud objects in __context__ to take advantage of Shade's in-memory caching across several states """ get_operator_cloud(auth) get_openstack_cloud(auth) def get_operator_cloud(auth=None): """ Return an operator_cloud """ if auth is None: auth = __salt__["config.option"]("neutron", {}) if "shade_opcloud" in __context__: if __context__["shade_opcloud"].auth == auth: return __context__["shade_opcloud"] __context__["shade_opcloud"] = shade.operator_cloud(**auth) return __context__["shade_opcloud"] def get_openstack_cloud(auth=None): """ Return an openstack_cloud """ if auth is None: auth = __salt__["config.option"]("neutron", {}) if "shade_oscloud" in __context__: if __context__["shade_oscloud"].auth == auth: return __context__["shade_oscloud"] __context__["shade_oscloud"] = shade.openstack_cloud(**auth) return __context__["shade_oscloud"] def network_create(auth=None, **kwargs): """ Create a network name Name of the network being created shared : False If ``True``, set the network as shared admin_state_up : True If ``True``, Set the network administrative state to "up" external : False Control whether or not this network is externally accessible provider An optional Python dictionary of network provider options project_id The project ID on which this network will be created CLI Example: .. code-block:: bash salt '*' neutronng.network_create name=network2 \ shared=True admin_state_up=True external=True salt '*' neutronng.network_create name=network3 \ provider='{"network_type": "vlan",\ "segmentation_id": "4010",\ "physical_network": "provider"}' \ project_id=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_network(**kwargs) def network_delete(auth=None, **kwargs): """ Delete a network name_or_id Name or ID of the network being deleted CLI Example: .. code-block:: bash salt '*' neutronng.network_delete name_or_id=network1 salt '*' neutronng.network_delete name_or_id=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_network(**kwargs) def list_networks(auth=None, **kwargs): """ List networks filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.list_networks salt '*' neutronng.list_networks \ filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_networks(**kwargs) def network_get(auth=None, **kwargs): """ Get a single network filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.network_get name=XLB4 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_network(**kwargs) def subnet_create(auth=None, **kwargs): """ Create a subnet network_name_or_id The unique name or ID of the attached network. If a non-unique name is supplied, an exception is raised. cidr The CIDR ip_version The IP version, which is 4 or 6. enable_dhcp : False Set to ``True`` if DHCP is enabled and ``False`` if disabled subnet_name The name of the subnet tenant_id The ID of the tenant who owns the network. Only administrative users can specify a tenant ID other than their own. allocation_pools A list of dictionaries of the start and end addresses for the allocation pools. gateway_ip The gateway IP address. When you specify both ``allocation_pools`` and ``gateway_ip``, you must ensure that the gateway IP does not overlap with the specified allocation pools. disable_gateway_ip : False Set to ``True`` if gateway IP address is disabled and ``False`` if enabled. It is not allowed with ``gateway_ip``. dns_nameservers A list of DNS name servers for the subnet host_routes A list of host route dictionaries for the subnet ipv6_ra_mode IPv6 Router Advertisement mode. Valid values are ``dhcpv6-stateful``, ``dhcpv6-stateless``, or ``slaac``. ipv6_address_mode IPv6 address mode. Valid values are ``dhcpv6-stateful``, ``dhcpv6-stateless``, or ``slaac``. use_default_subnetpool If ``True``, use the default subnetpool for ``ip_version`` to obtain a CIDR. It is required to pass ``None`` to the ``cidr`` argument when enabling this option. CLI Example: .. code-block:: bash salt '*' neutronng.subnet_create network_name_or_id=network1 subnet_name=subnet1 salt '*' neutronng.subnet_create subnet_name=subnet2\ network_name_or_id=network2 enable_dhcp=True \ allocation_pools='[{"start": "192.168.199.2",\ "end": "192.168.199.254"}]'\ gateway_ip='192.168.199.1' cidr=192.168.199.0/24 salt '*' neutronng.subnet_create network_name_or_id=network1 \ subnet_name=subnet1 dns_nameservers='["8.8.8.8", "8.8.8.7"]' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.create_subnet(**kwargs) def subnet_update(auth=None, **kwargs): """ Update a subnet name_or_id Name or ID of the subnet to update subnet_name The new name of the subnet enable_dhcp Set to ``True`` if DHCP is enabled and ``False`` if disabled gateway_ip The gateway IP address. When you specify both allocation_pools and gateway_ip, you must ensure that the gateway IP does not overlap with the specified allocation pools. disable_gateway_ip : False Set to ``True`` if gateway IP address is disabled and False if enabled. It is not allowed with ``gateway_ip``. allocation_pools A list of dictionaries of the start and end addresses for the allocation pools. dns_nameservers A list of DNS name servers for the subnet host_routes A list of host route dictionaries for the subnet .. code-block:: bash salt '*' neutronng.subnet_update name=subnet1 subnet_name=subnet2 salt '*' neutronng.subnet_update name=subnet1 dns_nameservers='["8.8.8.8", "8.8.8.7"]' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.update_subnet(**kwargs) def subnet_delete(auth=None, **kwargs): """ Delete a subnet name Name or ID of the subnet to update CLI Example: .. code-block:: bash salt '*' neutronng.subnet_delete name=subnet1 salt '*' neutronng.subnet_delete \ name=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_subnet(**kwargs) def list_subnets(auth=None, **kwargs): """ List subnets filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.list_subnets salt '*' neutronng.list_subnets \ filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_subnets(**kwargs) def subnet_get(auth=None, **kwargs): """ Get a single subnet filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.subnet_get name=subnet1 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_subnet(**kwargs) def security_group_create(auth=None, **kwargs): """ Create a security group. Use security_group_get to create default. project_id The project ID on which this security group will be created CLI Example: .. code-block:: bash salt '*' neutronng.security_group_create name=secgroup1 \ description="Very secure security group" salt '*' neutronng.security_group_create name=secgroup1 \ description="Very secure security group" \ project_id=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_security_group(**kwargs) def security_group_update(secgroup=None, auth=None, **kwargs): """ Update a security group secgroup Name, ID or Raw Object of the security group to update name New name for the security group description New description for the security group CLI Example: .. code-block:: bash salt '*' neutronng.security_group_update secgroup=secgroup1 \ description="Very secure security group" salt '*' neutronng.security_group_update secgroup=secgroup1 \ description="Very secure security group" \ project_id=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.update_security_group(secgroup, **kwargs) def security_group_delete(auth=None, **kwargs): """ Delete a security group name_or_id The name or unique ID of the security group CLI Example: .. code-block:: bash salt '*' neutronng.security_group_delete name_or_id=secgroup1 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_security_group(**kwargs) def security_group_get(auth=None, **kwargs): """ Get a single security group. This will create a default security group if one does not exist yet for a particular project id. filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.security_group_get \ name=1dcac318a83b4610b7a7f7ba01465548 salt '*' neutronng.security_group_get \ name=default\ filters='{"tenant_id":"2e778bb64ca64a199eb526b5958d8710"}' """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_security_group(**kwargs) def security_group_rule_create(auth=None, **kwargs): """ Create a rule in a security group secgroup_name_or_id The security group name or ID to associate with this security group rule. If a non-unique group name is given, an exception is raised. port_range_min The minimum port number in the range that is matched by the security group rule. If the protocol is TCP or UDP, this value must be less than or equal to the port_range_max attribute value. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. port_range_max The maximum port number in the range that is matched by the security group rule. The port_range_min attribute constrains the port_range_max attribute. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. protocol The protocol that is matched by the security group rule. Valid values are ``None``, ``tcp``, ``udp``, and ``icmp``. remote_ip_prefix The remote IP prefix to be associated with this security group rule. This attribute matches the specified IP prefix as the source IP address of the IP packet. remote_group_id The remote group ID to be associated with this security group rule direction Either ``ingress`` or ``egress``; the direction in which the security group rule is applied. For a compute instance, an ingress security group rule is applied to incoming (ingress) traffic for that instance. An egress rule is applied to traffic leaving the instance ethertype Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rules project_id Specify the project ID this security group will be created on (admin-only) CLI Example: .. code-block:: bash salt '*' neutronng.security_group_rule_create\ secgroup_name_or_id=secgroup1 salt '*' neutronng.security_group_rule_create\ secgroup_name_or_id=secgroup2 port_range_min=8080\ port_range_max=8080 direction='egress' salt '*' neutronng.security_group_rule_create\ secgroup_name_or_id=c0e1d1ce-7296-405e-919d-1c08217be529\ protocol=icmp project_id=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.create_security_group_rule(**kwargs) def security_group_rule_delete(auth=None, **kwargs): """ Delete a security group name_or_id The unique ID of the security group rule CLI Example: .. code-block:: bash salt '*' neutronng.security_group_rule_delete name_or_id=1dcac318a83b4610b7a7f7ba01465548 """ cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_security_group_rule(**kwargs)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/neutronng.py
0.742235
0.184345
neutronng.py
pypi
import logging import salt.utils.compat import salt.utils.json import salt.utils.versions from salt.exceptions import SaltInvocationError from salt.utils.decorators import depends try: # Disable unused import-errors as these are only used for dependency checking # pylint: disable=unused-import import boto3 import botocore # pylint: enable=unused-import from botocore.exceptions import ClientError, ParamValidationError, WaiterError logging.getLogger("boto3").setLevel(logging.INFO) HAS_BOTO = True except ImportError: HAS_BOTO = False log = logging.getLogger(__name__) def __virtual__(): """ Only load if boto libraries exist and if boto libraries are greater than a given version. """ return HAS_BOTO and salt.utils.versions.check_boto_reqs( boto3_ver="1.2.7", check_boto=False ) def __init__(opts): _ = opts if HAS_BOTO: __utils__["boto3.assign_funcs"](__name__, "es") def add_tags( domain_name=None, arn=None, tags=None, region=None, key=None, keyid=None, profile=None, ): """ Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive key value pairs. An Elasticsearch domain may have up to 10 tags. :param str domain_name: The name of the Elasticsearch domain you want to add tags to. :param str arn: The ARN of the Elasticsearch domain you want to add tags to. Specifying this overrides ``domain_name``. :param dict tags: The dict of tags to add to the Elasticsearch domain. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.add_tags domain_name=mydomain tags='{"foo": "bar", "baz": "qux"}' """ if not any((arn, domain_name)): raise SaltInvocationError( "At least one of domain_name or arn must be specified." ) ret = {"result": False} if arn is None: res = describe_elasticsearch_domain( domain_name=domain_name, region=region, key=key, keyid=keyid, profile=profile, ) if "error" in res: ret.update(res) elif not res["result"]: ret.update( { "error": 'The domain with name "{}" does not exist.'.format( domain_name ) } ) else: arn = res["response"].get("ARN") if arn: boto_params = { "ARN": arn, "TagList": [ {"Key": k, "Value": value} for k, value in (tags or {}).items() ], } try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.add_tags(**boto_params) ret["result"] = True except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.12.21") def cancel_elasticsearch_service_software_update( domain_name, region=None, keyid=None, key=None, profile=None ): """ Cancels a scheduled service software update for an Amazon ES domain. You can only perform this operation before the AutomatedUpdateDate and when the UpdateStatus is in the PENDING_UPDATE state. :param str domain_name: The name of the domain that you want to stop the latest service software update on. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the current service software options. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.cancel_elasticsearch_service_software_update(DomainName=domain_name) ret["result"] = True res["response"] = res["ServiceSoftwareOptions"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def create_elasticsearch_domain( domain_name, elasticsearch_version=None, elasticsearch_cluster_config=None, ebs_options=None, access_policies=None, snapshot_options=None, vpc_options=None, cognito_options=None, encryption_at_rest_options=None, node_to_node_encryption_options=None, advanced_options=None, log_publishing_options=None, blocking=False, region=None, key=None, keyid=None, profile=None, ): """ Given a valid config, create a domain. :param str domain_name: The name of the Elasticsearch domain that you are creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :param str elasticsearch_version: String of format X.Y to specify version for the Elasticsearch domain eg. "1.5" or "2.3". :param dict elasticsearch_cluster_config: Dictionary specifying the configuration options for an Elasticsearch domain. Keys (case sensitive) in here are: - InstanceType (str): The instance type for an Elasticsearch cluster. - InstanceCount (int): The instance type for an Elasticsearch cluster. - DedicatedMasterEnabled (bool): Indicate whether a dedicated master node is enabled. - ZoneAwarenessEnabled (bool): Indicate whether zone awareness is enabled. If this is not enabled, the Elasticsearch domain will only be in one availability zone. - ZoneAwarenessConfig (dict): Specifies the zone awareness configuration for a domain when zone awareness is enabled. Keys (case sensitive) in here are: - AvailabilityZoneCount (int): An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled. Allowed values: 2, 3 - DedicatedMasterType (str): The instance type for a dedicated master node. - DedicatedMasterCount (int): Total number of dedicated master nodes, active and on standby, for the cluster. :param dict ebs_options: Dict specifying the options to enable or disable and specifying the type and size of EBS storage volumes. Keys (case sensitive) in here are: - EBSEnabled (bool): Specifies whether EBS-based storage is enabled. - VolumeType (str): Specifies the volume type for EBS-based storage. - VolumeSize (int): Integer to specify the size of an EBS volume. - Iops (int): Specifies the IOPD for a Provisioned IOPS EBS volume (SSD). :type access_policies: str or dict :param access_policies: Dict or JSON string with the IAM access policy. :param dict snapshot_options: Dict specifying the snapshot options. Keys (case sensitive) in here are: - AutomatedSnapshotStartHour (int): Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours. :param dict vpc_options: Dict with the options to specify the subnets and security groups for the VPC endpoint. Keys (case sensitive) in here are: - SubnetIds (list): The list of subnets for the VPC endpoint. - SecurityGroupIds (list): The list of security groups for the VPC endpoint. :param dict cognito_options: Dict with options to specify the cognito user and identity pools for Kibana authentication. Keys (case sensitive) in here are: - Enabled (bool): Specifies the option to enable Cognito for Kibana authentication. - UserPoolId (str): Specifies the Cognito user pool ID for Kibana authentication. - IdentityPoolId (str): Specifies the Cognito identity pool ID for Kibana authentication. - RoleArn (str): Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources. :param dict encryption_at_rest_options: Dict specifying the encryption at rest options. Keys (case sensitive) in here are: - Enabled (bool): Specifies the option to enable Encryption At Rest. - KmsKeyId (str): Specifies the KMS Key ID for Encryption At Rest options. :param dict node_to_node_encryption_options: Dict specifying the node to node encryption options. Keys (case sensitive) in here are: - Enabled (bool): Specify True to enable node-to-node encryption. :param dict advanced_options: Dict with option to allow references to indices in an HTTP request body. Must be False when configuring access to individual sub-resources. By default, the value is True. See http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide\ /es-createupdatedomains.html#es-createdomain-configure-advanced-options for more information. :param dict log_publishing_options: Dict with options for various type of logs. The keys denote the type of log file and can be one of the following: - INDEX_SLOW_LOGS - SEARCH_SLOW_LOGS - ES_APPLICATION_LOGS The value assigned to each key is a dict with the following case sensitive keys: - CloudWatchLogsLogGroupArn (str): The ARN of the Cloudwatch log group to which the log needs to be published. - Enabled (bool): Specifies whether given log publishing option is enabled or not. :param bool blocking: Whether or not to wait (block) until the Elasticsearch domain has been created. Note: Not all instance types allow enabling encryption at rest. See https://docs.aws.amazon.com\ /elasticsearch-service/latest/developerguide/aes-supported-instance-types.html :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the domain status configuration. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.create_elasticsearch_domain mydomain \\ elasticsearch_cluster_config='{ \\ "InstanceType": "t2.micro.elasticsearch", \\ "InstanceCount": 1, \\ "DedicatedMasterEnabled": False, \\ "ZoneAwarenessEnabled": False}' \\ ebs_options='{ \\ "EBSEnabled": True, \\ "VolumeType": "gp2", \\ "VolumeSize": 10, \\ "Iops": 0}' \\ access_policies='{ \\ "Version": "2012-10-17", \\ "Statement": [ \\ {"Effect": "Allow", \\ "Principal": {"AWS": "*"}, \\ "Action": "es:*", \\ "Resource": "arn:aws:es:us-east-1:111111111111:domain/mydomain/*", \\ "Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]}' \\ snapshot_options='{"AutomatedSnapshotStartHour": 0}' \\ advanced_options='{"rest.action.multi.allow_explicit_index": "true"}' """ boto_kwargs = salt.utils.data.filter_falsey( { "DomainName": domain_name, "ElasticsearchVersion": str(elasticsearch_version or ""), "ElasticsearchClusterConfig": elasticsearch_cluster_config, "EBSOptions": ebs_options, "AccessPolicies": ( salt.utils.json.dumps(access_policies) if isinstance(access_policies, dict) else access_policies ), "SnapshotOptions": snapshot_options, "VPCOptions": vpc_options, "CognitoOptions": cognito_options, "EncryptionAtRestOptions": encryption_at_rest_options, "NodeToNodeEncryptionOptions": node_to_node_encryption_options, "AdvancedOptions": advanced_options, "LogPublishingOptions": log_publishing_options, } ) ret = {"result": False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) res = conn.create_elasticsearch_domain(**boto_kwargs) if res and "DomainStatus" in res: ret["result"] = True ret["response"] = res["DomainStatus"] if blocking: conn.get_waiter("ESDomainAvailable").wait(DomainName=domain_name) except (ParamValidationError, ClientError, WaiterError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def delete_elasticsearch_domain( domain_name, blocking=False, region=None, key=None, keyid=None, profile=None ): """ Permanently deletes the specified Elasticsearch domain and all of its data. Once a domain is deleted, it cannot be recovered. :param str domain_name: The name of the domain to delete. :param bool blocking: Whether or not to wait (block) until the Elasticsearch domain has been deleted. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_elasticsearch_domain(DomainName=domain_name) ret["result"] = True if blocking: conn.get_waiter("ESDomainDeleted").wait(DomainName=domain_name) except (ParamValidationError, ClientError, WaiterError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.7.30") def delete_elasticsearch_service_role(region=None, keyid=None, key=None, profile=None): """ Deletes the service-linked role that Elasticsearch Service uses to manage and maintain VPC domains. Role deletion will fail if any existing VPC domains use the role. You must delete any such Elasticsearch domains before deleting the role. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) conn.delete_elasticsearch_service_role() ret["result"] = True except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def describe_elasticsearch_domain( domain_name, region=None, keyid=None, key=None, profile=None ): """ Given a domain name gets its status description. :param str domain_name: The name of the domain to get the status of. :rtype: dict :return: Dictionary ith key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the domain status information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) res = conn.describe_elasticsearch_domain(DomainName=domain_name) if res and "DomainStatus" in res: ret["result"] = True ret["response"] = res["DomainStatus"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def describe_elasticsearch_domain_config( domain_name, region=None, keyid=None, key=None, profile=None ): """ Provides cluster configuration information about the specified Elasticsearch domain, such as the state, creation date, update version, and update date for cluster options. :param str domain_name: The name of the domain to describe. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the current configuration information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) res = conn.describe_elasticsearch_domain_config(DomainName=domain_name) if res and "DomainConfig" in res: ret["result"] = True ret["response"] = res["DomainConfig"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def describe_elasticsearch_domains( domain_names, region=None, keyid=None, key=None, profile=None ): """ Returns domain configuration information about the specified Elasticsearch domains, including the domain ID, domain endpoint, and domain ARN. :param list domain_names: List of domain names to get information for. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the list of domain status information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.describe_elasticsearch_domains '["domain_a", "domain_b"]' """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.describe_elasticsearch_domains(DomainNames=domain_names) if res and "DomainStatusList" in res: ret["result"] = True ret["response"] = res["DomainStatusList"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.5.18") def describe_elasticsearch_instance_type_limits( instance_type, elasticsearch_version, domain_name=None, region=None, keyid=None, key=None, profile=None, ): """ Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. When modifying existing Domain, specify the `` DomainName `` to know what Limits are supported for modifying. :param str instance_type: The instance type for an Elasticsearch cluster for which Elasticsearch ``Limits`` are needed. :param str elasticsearch_version: Version of Elasticsearch for which ``Limits`` are needed. :param str domain_name: Represents the name of the Domain that we are trying to modify. This should be present only if we are querying for Elasticsearch ``Limits`` for existing domain. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the limits information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.describe_elasticsearch_instance_type_limits \\ instance_type=r3.8xlarge.elasticsearch \\ elasticsearch_version='6.2' """ ret = {"result": False} boto_params = salt.utils.data.filter_falsey( { "DomainName": domain_name, "InstanceType": instance_type, "ElasticsearchVersion": str(elasticsearch_version), } ) try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.describe_elasticsearch_instance_type_limits(**boto_params) if res and "LimitsByRole" in res: ret["result"] = True ret["response"] = res["LimitsByRole"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.15") def describe_reserved_elasticsearch_instance_offerings( reserved_elasticsearch_instance_offering_id=None, region=None, keyid=None, key=None, profile=None, ): """ Lists available reserved Elasticsearch instance offerings. :param str reserved_elasticsearch_instance_offering_id: The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the list of offerings information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) boto_params = { "ReservedElasticsearchInstanceOfferingId": reserved_elasticsearch_instance_offering_id } res = [] for page in conn.get_paginator( "describe_reserved_elasticsearch_instance_offerings" ).paginate(**boto_params): res.extend(page["ReservedElasticsearchInstanceOfferings"]) if res: ret["result"] = True ret["response"] = res except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.15") def describe_reserved_elasticsearch_instances( reserved_elasticsearch_instance_id=None, region=None, keyid=None, key=None, profile=None, ): """ Returns information about reserved Elasticsearch instances for this account. :param str reserved_elasticsearch_instance_id: The reserved instance identifier filter value. Use this parameter to show only the reservation that matches the specified reserved Elasticsearch instance ID. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a list of information on reserved instances. Upon failure, also contains a key 'error' with the error message as value. :note: Version 1.9.174 of boto3 has a bug in that reserved_elasticsearch_instance_id is considered a required argument, even though the documentation says otherwise. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) boto_params = { "ReservedElasticsearchInstanceId": reserved_elasticsearch_instance_id, } res = [] for page in conn.get_paginator( "describe_reserved_elasticsearch_instances" ).paginate(**boto_params): res.extend(page["ReservedElasticsearchInstances"]) if res: ret["result"] = True ret["response"] = res except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.77") def get_compatible_elasticsearch_versions( domain_name=None, region=None, keyid=None, key=None, profile=None ): """ Returns a list of upgrade compatible Elastisearch versions. You can optionally pass a ``domain_name`` to get all upgrade compatible Elasticsearch versions for that specific domain. :param str domain_name: The name of an Elasticsearch domain. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a list of compatible versions. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} boto_params = salt.utils.data.filter_falsey({"DomainName": domain_name}) try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.get_compatible_elasticsearch_versions(**boto_params) if res and "CompatibleElasticsearchVersions" in res: ret["result"] = True ret["response"] = res["CompatibleElasticsearchVersions"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.77") def get_upgrade_history(domain_name, region=None, keyid=None, key=None, profile=None): """ Retrieves the complete history of the last 10 upgrades that were performed on the domain. :param str domain_name: The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a list of upgrade histories. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) boto_params = {"DomainName": domain_name} res = [] for page in conn.get_paginator("get_upgrade_history").paginate(**boto_params): res.extend(page["UpgradeHistories"]) if res: ret["result"] = True ret["response"] = res except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.77") def get_upgrade_status(domain_name, region=None, keyid=None, key=None, profile=None): """ Retrieves the latest status of the last upgrade or upgrade eligibility check that was performed on the domain. :param str domain_name: The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with upgrade status information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} boto_params = {"DomainName": domain_name} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.get_upgrade_status(**boto_params) ret["result"] = True ret["response"] = res del res["ResponseMetadata"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def list_domain_names(region=None, keyid=None, key=None, profile=None): """ Returns the name of all Elasticsearch domains owned by the current user's account. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a list of domain names. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.list_domain_names() if res and "DomainNames" in res: ret["result"] = True ret["response"] = [item["DomainName"] for item in res["DomainNames"]] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.5.18") def list_elasticsearch_instance_types( elasticsearch_version, domain_name=None, region=None, keyid=None, key=None, profile=None, ): """ List all Elasticsearch instance types that are supported for given ElasticsearchVersion. :param str elasticsearch_version: Version of Elasticsearch for which list of supported elasticsearch instance types are needed. :param str domain_name: DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for list of available Elasticsearch instance types when modifying existing domain. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a list of Elasticsearch instance types. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) boto_params = salt.utils.data.filter_falsey( { "ElasticsearchVersion": str(elasticsearch_version), "DomainName": domain_name, } ) res = [] for page in conn.get_paginator("list_elasticsearch_instance_types").paginate( **boto_params ): res.extend(page["ElasticsearchInstanceTypes"]) if res: ret["result"] = True ret["response"] = res except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.5.18") def list_elasticsearch_versions(region=None, keyid=None, key=None, profile=None): """ List all supported Elasticsearch versions. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a list of Elasticsearch versions. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = [] for page in conn.get_paginator("list_elasticsearch_versions").paginate(): res.extend(page["ElasticsearchVersions"]) if res: ret["result"] = True ret["response"] = res except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def list_tags( domain_name=None, arn=None, region=None, key=None, keyid=None, profile=None ): """ Returns all tags for the given Elasticsearch domain. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with a dict of tags. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ if not any((arn, domain_name)): raise SaltInvocationError( "At least one of domain_name or arn must be specified." ) ret = {"result": False} if arn is None: res = describe_elasticsearch_domain( domain_name=domain_name, region=region, key=key, keyid=keyid, profile=profile, ) if "error" in res: ret.update(res) elif not res["result"]: ret.update( { "error": 'The domain with name "{}" does not exist.'.format( domain_name ) } ) else: arn = res["response"].get("ARN") if arn: try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) res = conn.list_tags(ARN=arn) ret["result"] = True ret["response"] = { item["Key"]: item["Value"] for item in res.get("TagList", []) } except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.15") def purchase_reserved_elasticsearch_instance_offering( reserved_elasticsearch_instance_offering_id, reservation_name, instance_count=None, region=None, keyid=None, key=None, profile=None, ): """ Allows you to purchase reserved Elasticsearch instances. :param str reserved_elasticsearch_instance_offering_id: The ID of the reserved Elasticsearch instance offering to purchase. :param str reservation_name: A customer-specified identifier to track this reservation. :param int instance_count: The number of Elasticsearch instances to reserve. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with purchase information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} boto_params = salt.utils.data.filter_falsey( { "ReservedElasticsearchInstanceOfferingId": reserved_elasticsearch_instance_offering_id, "ReservationName": reservation_name, "InstanceCount": instance_count, } ) try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.purchase_reserved_elasticsearch_instance_offering(**boto_params) if res: ret["result"] = True ret["response"] = res except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def remove_tags( tag_keys, domain_name=None, arn=None, region=None, key=None, keyid=None, profile=None, ): """ Removes the specified set of tags from the specified Elasticsearch domain. :param list tag_keys: List with tag keys you want to remove from the Elasticsearch domain. :param str domain_name: The name of the Elasticsearch domain you want to remove tags from. :param str arn: The ARN of the Elasticsearch domain you want to remove tags from. Specifying this overrides ``domain_name``. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.remove_tags '["foo", "bar"]' domain_name=my_domain """ if not any((arn, domain_name)): raise SaltInvocationError( "At least one of domain_name or arn must be specified." ) ret = {"result": False} if arn is None: res = describe_elasticsearch_domain( domain_name=domain_name, region=region, key=key, keyid=keyid, profile=profile, ) if "error" in res: ret.update(res) elif not res["result"]: ret.update( { "error": 'The domain with name "{}" does not exist.'.format( domain_name ) } ) else: arn = res["response"].get("ARN") if arn: try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.remove_tags(ARN=arn, TagKeys=tag_keys) ret["result"] = True except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.12.21") def start_elasticsearch_service_software_update( domain_name, region=None, keyid=None, key=None, profile=None ): """ Schedules a service software update for an Amazon ES domain. :param str domain_name: The name of the domain that you want to update to the latest service software. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with service software information. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} boto_params = {"DomainName": domain_name} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.start_elasticsearch_service_software_update(**boto_params) if res and "ServiceSoftwareOptions" in res: ret["result"] = True ret["response"] = res["ServiceSoftwareOptions"] except (ParamValidationError, ClientError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def update_elasticsearch_domain_config( domain_name, elasticsearch_cluster_config=None, ebs_options=None, vpc_options=None, access_policies=None, snapshot_options=None, cognito_options=None, advanced_options=None, log_publishing_options=None, blocking=False, region=None, key=None, keyid=None, profile=None, ): """ Modifies the cluster configuration of the specified Elasticsearch domain, for example setting the instance type and the number of instances. :param str domain_name: The name of the Elasticsearch domain that you are creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :param dict elasticsearch_cluster_config: Dictionary specifying the configuration options for an Elasticsearch domain. Keys (case sensitive) in here are: - InstanceType (str): The instance type for an Elasticsearch cluster. - InstanceCount (int): The instance type for an Elasticsearch cluster. - DedicatedMasterEnabled (bool): Indicate whether a dedicated master node is enabled. - ZoneAwarenessEnabled (bool): Indicate whether zone awareness is enabled. - ZoneAwarenessConfig (dict): Specifies the zone awareness configuration for a domain when zone awareness is enabled. Keys (case sensitive) in here are: - AvailabilityZoneCount (int): An integer value to indicate the number of availability zones for a domain when zone awareness is enabled. This should be equal to number of subnets if VPC endpoints is enabled. - DedicatedMasterType (str): The instance type for a dedicated master node. - DedicatedMasterCount (int): Total number of dedicated master nodes, active and on standby, for the cluster. :param dict ebs_options: Dict specifying the options to enable or disable and specifying the type and size of EBS storage volumes. Keys (case sensitive) in here are: - EBSEnabled (bool): Specifies whether EBS-based storage is enabled. - VolumeType (str): Specifies the volume type for EBS-based storage. - VolumeSize (int): Integer to specify the size of an EBS volume. - Iops (int): Specifies the IOPD for a Provisioned IOPS EBS volume (SSD). :param dict snapshot_options: Dict specifying the snapshot options. Keys (case sensitive) in here are: - AutomatedSnapshotStartHour (int): Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours. :param dict vpc_options: Dict with the options to specify the subnets and security groups for the VPC endpoint. Keys (case sensitive) in here are: - SubnetIds (list): The list of subnets for the VPC endpoint. - SecurityGroupIds (list): The list of security groups for the VPC endpoint. :param dict cognito_options: Dict with options to specify the cognito user and identity pools for Kibana authentication. Keys (case sensitive) in here are: - Enabled (bool): Specifies the option to enable Cognito for Kibana authentication. - UserPoolId (str): Specifies the Cognito user pool ID for Kibana authentication. - IdentityPoolId (str): Specifies the Cognito identity pool ID for Kibana authentication. - RoleArn (str): Specifies the role ARN that provides Elasticsearch permissions for accessing Cognito resources. :param dict advanced_options: Dict with option to allow references to indices in an HTTP request body. Must be False when configuring access to individual sub-resources. By default, the value is True. See http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide\ /es-createupdatedomains.html#es-createdomain-configure-advanced-options for more information. :param str/dict access_policies: Dict or JSON string with the IAM access policy. :param dict log_publishing_options: Dict with options for various type of logs. The keys denote the type of log file and can be one of the following: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS. The value assigned to each key is a dict with the following case sensitive keys: - CloudWatchLogsLogGroupArn (str): The ARN of the Cloudwatch log group to which the log needs to be published. - Enabled (bool): Specifies whether given log publishing option is enabled or not. :param bool blocking: Whether or not to wait (block) until the Elasticsearch domain has been updated. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the domain configuration. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.update_elasticsearch_domain_config mydomain \\ elasticsearch_cluster_config='{\\ "InstanceType": "t2.micro.elasticsearch", \\ "InstanceCount": 1, \\ "DedicatedMasterEnabled": false, "ZoneAwarenessEnabled": false}' \\ ebs_options='{\\ "EBSEnabled": true, \\ "VolumeType": "gp2", \\ "VolumeSize": 10, \\ "Iops": 0}' \\ access_policies='{"Version": "2012-10-17", "Statement": [{\\ "Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", \\ "Resource": "arn:aws:es:us-east-1:111111111111:domain/mydomain/*", \\ "Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]}' \\ snapshot_options='{"AutomatedSnapshotStartHour": 0}' \\ advanced_options='{"rest.action.multi.allow_explicit_index": "true"}' """ ret = {"result": False} boto_kwargs = salt.utils.data.filter_falsey( { "DomainName": domain_name, "ElasticsearchClusterConfig": elasticsearch_cluster_config, "EBSOptions": ebs_options, "SnapshotOptions": snapshot_options, "VPCOptions": vpc_options, "CognitoOptions": cognito_options, "AdvancedOptions": advanced_options, "AccessPolicies": ( salt.utils.json.dumps(access_policies) if isinstance(access_policies, dict) else access_policies ), "LogPublishingOptions": log_publishing_options, } ) try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.update_elasticsearch_domain_config(**boto_kwargs) if not res or "DomainConfig" not in res: log.warning("Domain was not updated") else: ret["result"] = True ret["response"] = res["DomainConfig"] if blocking: conn.get_waiter("ESDomainAvailable").wait(DomainName=domain_name) except (ParamValidationError, ClientError, WaiterError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.77") def upgrade_elasticsearch_domain( domain_name, target_version, perform_check_only=None, blocking=False, region=None, keyid=None, key=None, profile=None, ): """ Allows you to either upgrade your domain or perform an Upgrade eligibility check to a compatible Elasticsearch version. :param str domain_name: The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :param str target_version: The version of Elasticsearch that you intend to upgrade the domain to. :param bool perform_check_only: This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed. This will not actually perform the Upgrade. :param bool blocking: Whether or not to wait (block) until the Elasticsearch domain has been upgraded. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with the domain configuration. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.upgrade_elasticsearch_domain mydomain \\ target_version='6.7' \\ perform_check_only=True """ ret = {"result": False} boto_params = salt.utils.data.filter_falsey( { "DomainName": domain_name, "TargetVersion": str(target_version), "PerformCheckOnly": perform_check_only, } ) try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) res = conn.upgrade_elasticsearch_domain(**boto_params) if res: ret["result"] = True ret["response"] = res if blocking: conn.get_waiter("ESUpgradeFinished").wait(DomainName=domain_name) except (ParamValidationError, ClientError, WaiterError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def exists(domain_name, region=None, key=None, keyid=None, profile=None): """ Given a domain name, check to see if the given domain exists. :param str domain_name: The name of the domain to check. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.describe_elasticsearch_domain(DomainName=domain_name) ret["result"] = True except (ParamValidationError, ClientError) as exp: if exp.response.get("Error", {}).get("Code") != "ResourceNotFoundException": ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret def wait_for_upgrade(domain_name, region=None, keyid=None, key=None, profile=None): """ Block until an upgrade-in-progress for domain ``name`` is finished. :param str name: The name of the domain to wait for. :rtype dict: :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 """ ret = {"result": False} try: conn = _get_conn(region=region, keyid=keyid, key=key, profile=profile) conn.get_waiter("ESUpgradeFinished").wait(DomainName=domain_name) ret["result"] = True except (ParamValidationError, ClientError, WaiterError) as exp: ret.update({"error": __utils__["boto3.get_error"](exp)["message"]}) return ret @depends("botocore", version="1.10.77") def check_upgrade_eligibility( domain_name, elasticsearch_version, region=None, keyid=None, key=None, profile=None ): """ Helper function to determine in one call if an Elasticsearch domain can be upgraded to the specified Elasticsearch version. This assumes that the Elasticsearch domain is at rest at the moment this function is called. I.e. The domain is not in the process of : - being created. - being updated. - another upgrade running, or a check thereof. - being deleted. Behind the scenes, this does 3 things: - Check if ``elasticsearch_version`` is among the compatible elasticsearch versions. - Perform a check if the Elasticsearch domain is eligible for the upgrade. - Check the result of the check and return the result as a boolean. :param str name: The Elasticsearch domain name to check. :param str elasticsearch_version: The Elasticsearch version to upgrade to. :rtype: dict :return: Dictionary with key 'result' and as value a boolean denoting success or failure. Upon success, also contains a key 'reponse' with boolean result of the check. Upon failure, also contains a key 'error' with the error message as value. .. versionadded:: 3001 CLI Example: .. code-block:: bash salt myminion boto3_elasticsearch.check_upgrade_eligibility mydomain '6.7' """ ret = {"result": False} # Check if the desired version is in the list of compatible versions res = get_compatible_elasticsearch_versions( domain_name, region=region, keyid=keyid, key=key, profile=profile ) if "error" in res: return res compatible_versions = res["response"][0]["TargetVersions"] if str(elasticsearch_version) not in compatible_versions: ret["result"] = True ret["response"] = False ret["error"] = 'Desired version "{}" not in compatible versions: {}.'.format( elasticsearch_version, compatible_versions ) return ret # Check if the domain is eligible to upgrade to the desired version res = upgrade_elasticsearch_domain( domain_name, elasticsearch_version, perform_check_only=True, blocking=True, region=region, keyid=keyid, key=key, profile=profile, ) if "error" in res: return res res = wait_for_upgrade( domain_name, region=region, keyid=keyid, key=key, profile=profile ) if "error" in res: return res res = get_upgrade_status( domain_name, region=region, keyid=keyid, key=key, profile=profile ) ret["result"] = True ret["response"] = ( res["response"]["UpgradeStep"] == "PRE_UPGRADE_CHECK" and res["response"]["StepStatus"] == "SUCCEEDED" ) return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/boto3_elasticsearch.py
0.677474
0.202838
boto3_elasticsearch.py
pypi
import logging import salt.utils.json import salt.utils.mattermost from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) __virtualname__ = "mattermost" def __virtual__(): """ Return virtual name of the module. :return: The virtual name of the module. """ return __virtualname__ def _get_hook(): """ Retrieves and return the Mattermost's configured hook :return: String: the hook string """ hook = __salt__["config.get"]("mattermost.hook") or __salt__["config.get"]( "mattermost:hook" ) if not hook: raise SaltInvocationError("No Mattermost Hook found") return hook def _get_api_url(): """ Retrieves and return the Mattermost's configured api url :return: String: the api url string """ api_url = __salt__["config.get"]("mattermost.api_url") or __salt__["config.get"]( "mattermost:api_url" ) if not api_url: raise SaltInvocationError("No Mattermost API URL found") return api_url def _get_channel(): """ Retrieves the Mattermost's configured channel :return: String: the channel string """ channel = __salt__["config.get"]("mattermost.channel") or __salt__["config.get"]( "mattermost:channel" ) return channel def _get_username(): """ Retrieves the Mattermost's configured username :return: String: the username string """ username = __salt__["config.get"]("mattermost.username") or __salt__["config.get"]( "mattermost:username" ) return username def post_message(message, channel=None, username=None, api_url=None, hook=None): """ Send a message to a Mattermost channel. :param channel: The channel name, either will work. :param username: The username of the poster. :param message: The message to send to the Mattermost channel. :param api_url: The Mattermost api url, if not specified in the configuration. :param hook: The Mattermost hook, if not specified in the configuration. :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt '*' mattermost.post_message message='Build is done' """ if not api_url: api_url = _get_api_url() if not hook: hook = _get_hook() if not username: username = _get_username() if not channel: channel = _get_channel() if not message: log.error("message is a required option.") parameters = dict() if channel: parameters["channel"] = channel if username: parameters["username"] = username parameters["text"] = "```" + message + "```" # pre-formatted, fixed-width text log.debug("Parameters: %s", parameters) data = "payload={}".format( salt.utils.json.dumps(parameters) ) # pylint: disable=blacklisted-function result = salt.utils.mattermost.query(api_url=api_url, hook=hook, data=data) return bool(result)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/mattermost.py
0.842669
0.188922
mattermost.py
pypi
import salt.utils.path __authorized_modules__ = ["version", "namenode", "dfsadmin", "dfs", "fs"] def __virtual__(): """ Check if hadoop is present, then load the module """ if salt.utils.path.which("hadoop") or salt.utils.path.which("hdfs"): return "hadoop" return ( False, "The hadoop execution module cannot be loaded: hadoop or hdfs binary not in" " path.", ) def _hadoop_cmd(module, command, *args): """ Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls / """ tool = "hadoop" if salt.utils.path.which("hdfs"): tool = "hdfs" out = None if module and command: if module in __authorized_modules__: mappings = { "tool": tool, "module": module, "command": command, "args": " ".join(args), } cmd = "{tool} {module} -{command} {args}".format(**mappings) out = __salt__["cmd.run"](cmd, python_shell=False) else: return "Error: Unknown module" else: return "Error: Module and command not defined" return out def version(): """ Return version from hadoop version CLI Example: .. code-block:: bash salt '*' hadoop.version """ module = "version" out = _hadoop_cmd(module, True).split() return out[1] def dfs(command=None, *args): """ Execute a command on DFS CLI Example: .. code-block:: bash salt '*' hadoop.dfs ls / """ if command: return _hadoop_cmd("dfs", command, *args) else: return "Error: command must be provided" def dfsadmin_report(arg=None): """ .. versionadded:: 2019.2.0 Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes. arg [live] [dead] [decommissioning] CLI Example: .. code-block:: bash salt '*' hadoop.dfsadmin -report """ if arg is not None: if arg in ["live", "dead", "decommissioning"]: return _hadoop_cmd("dfsadmin", "report", arg) else: return ( "Error: the arg is wrong, it must be in ['live', 'dead'," " 'decommissioning']" ) else: return _hadoop_cmd("dfsadmin", "report") def dfs_present(path): """ Check if a file or directory is present on the distributed FS. CLI Example: .. code-block:: bash salt '*' hadoop.dfs_present /some_random_file Returns True if the file is present """ cmd_return = _hadoop_cmd("dfs", "stat", path) match = "No such file or directory" return False if match in cmd_return else True def dfs_absent(path): """ Check if a file or directory is absent on the distributed FS. CLI Example: .. code-block:: bash salt '*' hadoop.dfs_absent /some_random_file Returns True if the file is absent """ cmd_return = _hadoop_cmd("dfs", "stat", path) match = "No such file or directory" return True if match in cmd_return else False def namenode_format(force=None): """ Format a name node .. code-block:: bash salt '*' hadoop.namenode_format force=True """ force_param = "" if force: force_param = "-force" return _hadoop_cmd("namenode", "format", "-nonInteractive", force_param)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/hadoop.py
0.554229
0.168515
hadoop.py
pypi
import logging from datetime import datetime import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import CommandExecutionError try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False log = logging.getLogger(__name__) # Start logging __virtualname__ = "shadow" def __virtual__(): # Is this macOS? if not salt.utils.platform.is_darwin(): return False, "Not macOS" if HAS_PWD: return __virtualname__ else: return (False, "The pwd module failed to load.") def _get_account_policy(name): """ Get the entire accountPolicy and return it as a dictionary. For use by this module only :param str name: The user name :return: a dictionary containing all values for the accountPolicy :rtype: dict :raises: CommandExecutionError on user not found or any other unknown error """ cmd = "pwpolicy -u {} -getpolicy".format(name) try: ret = salt.utils.mac_utils.execute_return_result(cmd) except CommandExecutionError as exc: if "Error: user <{}> not found".format(name) in exc.strerror: raise CommandExecutionError("User not found: {}".format(name)) raise CommandExecutionError("Unknown error: {}".format(exc.strerror)) try: policy_list = ret.split("\n")[1].split(" ") policy_dict = {} for policy in policy_list: if "=" in policy: key, value = policy.split("=") policy_dict[key] = value return policy_dict except IndexError: return {} def _set_account_policy(name, policy): """ Set a value in the user accountPolicy. For use by this module only :param str name: The user name :param str policy: The policy to apply :return: True if success, otherwise False :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error """ cmd = 'pwpolicy -u {} -setpolicy "{}"'.format(name, policy) try: return salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: if "Error: user <{}> not found".format(name) in exc.strerror: raise CommandExecutionError("User not found: {}".format(name)) raise CommandExecutionError("Unknown error: {}".format(exc.strerror)) def _get_account_policy_data_value(name, key): """ Return the value for a key in the accountPolicy section of the user's plist file. For use by this module only :param str name: The username :param str key: The accountPolicy key :return: The value contained within the key :rtype: str :raises: CommandExecutionError on user not found or any other unknown error """ cmd = "dscl . -readpl /Users/{} accountPolicyData {}".format(name, key) try: ret = salt.utils.mac_utils.execute_return_result(cmd) except CommandExecutionError as exc: if "eDSUnknownNodeName" in exc.strerror: raise CommandExecutionError("User not found: {}".format(name)) raise CommandExecutionError("Unknown error: {}".format(exc.strerror)) return ret def _convert_to_datetime(unix_timestamp): """ Converts a unix timestamp to a human readable date/time :param float unix_timestamp: A unix timestamp :return: A date/time in the format YYYY-mm-dd HH:MM:SS :rtype: str """ try: unix_timestamp = float(unix_timestamp) return datetime.fromtimestamp(unix_timestamp).strftime("%Y-%m-%d %H:%M:%S") except (ValueError, TypeError): return "Invalid Timestamp" def info(name): """ Return information for the specified user :param str name: The username :return: A dictionary containing the user's shadow information :rtype: dict CLI Example: .. code-block:: bash salt '*' shadow.info admin """ try: data = pwd.getpwnam(name) return { "name": data.pw_name, "passwd": data.pw_passwd, "account_created": get_account_created(name), "login_failed_count": get_login_failed_count(name), "login_failed_last": get_login_failed_last(name), "lstchg": get_last_change(name), "max": get_maxdays(name), "expire": get_expire(name), "change": get_change(name), "min": "Unavailable", "warn": "Unavailable", "inact": "Unavailable", } except KeyError: log.debug("User not found: %s", name) return { "name": "", "passwd": "", "account_created": "", "login_failed_count": "", "login_failed_last": "", "lstchg": "", "max": "", "expire": "", "change": "", "min": "", "warn": "", "inact": "", } def get_account_created(name): """ Get the date/time the account was created :param str name: The username of the account :return: The date/time the account was created (yyyy-mm-dd hh:mm:ss) :rtype: str :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_account_created admin """ ret = _get_account_policy_data_value(name, "creationTime") unix_timestamp = salt.utils.mac_utils.parse_return(ret) date_text = _convert_to_datetime(unix_timestamp) return date_text def get_last_change(name): """ Get the date/time the account was changed :param str name: The username of the account :return: The date/time the account was modified (yyyy-mm-dd hh:mm:ss) :rtype: str :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_last_change admin """ ret = _get_account_policy_data_value(name, "passwordLastSetTime") unix_timestamp = salt.utils.mac_utils.parse_return(ret) date_text = _convert_to_datetime(unix_timestamp) return date_text def get_login_failed_count(name): """ Get the number of failed login attempts :param str name: The username of the account :return: The number of failed login attempts :rtype: int :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_login_failed_count admin """ ret = _get_account_policy_data_value(name, "failedLoginCount") return salt.utils.mac_utils.parse_return(ret) def get_login_failed_last(name): """ Get the date/time of the last failed login attempt :param str name: The username of the account :return: The date/time of the last failed login attempt on this account (yyyy-mm-dd hh:mm:ss) :rtype: str :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_login_failed_last admin """ ret = _get_account_policy_data_value(name, "failedLoginTimestamp") unix_timestamp = salt.utils.mac_utils.parse_return(ret) date_text = _convert_to_datetime(unix_timestamp) return date_text def set_maxdays(name, days): """ Set the maximum age of the password in days :param str name: The username of the account :param int days: The maximum age of the account in days :return: True if successful, False if not :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.set_maxdays admin 90 """ minutes = days * 24 * 60 _set_account_policy(name, "maxMinutesUntilChangePassword={}".format(minutes)) return get_maxdays(name) == days def get_maxdays(name): """ Get the maximum age of the password :param str name: The username of the account :return: The maximum age of the password in days :rtype: int :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_maxdays admin 90 """ policies = _get_account_policy(name) if "maxMinutesUntilChangePassword" in policies: max_minutes = policies["maxMinutesUntilChangePassword"] return int(max_minutes) / 24 / 60 return 0 def set_mindays(name, days): """ Set the minimum password age in days. Not available in macOS. :param str name: The user name :param int days: The number of days :return: Will always return False until macOS supports this feature. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.set_mindays admin 90 """ return False def set_inactdays(name, days): """ Set the number if inactive days before the account is locked. Not available in macOS :param str name: The user name :param int days: The number of days :return: Will always return False until macOS supports this feature. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.set_inactdays admin 90 """ return False def set_warndays(name, days): """ Set the number of days before the password expires that the user will start to see a warning. Not available in macOS :param str name: The user name :param int days: The number of days :return: Will always return False until macOS supports this feature. :rtype: bool CLI Example: .. code-block:: bash salt '*' shadow.set_warndays admin 90 """ return False def set_change(name, date): """ Sets the date on which the password expires. The user will be required to change their password. Format is mm/dd/yyyy :param str name: The name of the user account :param date date: The date the password will expire. Must be in mm/dd/yyyy format. :return: True if successful, otherwise False :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.set_change username 09/21/2016 """ _set_account_policy(name, "usingExpirationDate=1 expirationDateGMT={}".format(date)) return get_change(name) == date def get_change(name): """ Gets the date on which the password expires :param str name: The name of the user account :return: The date the password will expire :rtype: str :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_change username """ policies = _get_account_policy(name) if "expirationDateGMT" in policies: return policies["expirationDateGMT"] return "Value not set" def set_expire(name, date): """ Sets the date on which the account expires. The user will not be able to login after this date. Date format is mm/dd/yyyy :param str name: The name of the user account :param datetime date: The date the account will expire. Format must be mm/dd/yyyy. :return: True if successful, False if not :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.set_expire username 07/23/2015 """ _set_account_policy( name, "usingHardExpirationDate=1 hardExpireDateGMT={}".format(date) ) return get_expire(name) == date def get_expire(name): """ Gets the date on which the account expires :param str name: The name of the user account :return: The date the account expires :rtype: str :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_expire username """ policies = _get_account_policy(name) if "hardExpireDateGMT" in policies: return policies["hardExpireDateGMT"] return "Value not set" def del_password(name): """ Deletes the account password :param str name: The user name of the account :return: True if successful, otherwise False :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.del_password username """ # This removes the password cmd = "dscl . -passwd /Users/{} ''".format(name) try: salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: if "eDSUnknownNodeName" in exc.strerror: raise CommandExecutionError("User not found: {}".format(name)) raise CommandExecutionError("Unknown error: {}".format(exc.strerror)) # This is so it looks right in shadow.info cmd = "dscl . -create /Users/{} Password '*'".format(name) salt.utils.mac_utils.execute_return_success(cmd) return info(name)["passwd"] == "*" def set_password(name, password): """ Set the password for a named user (insecure, the password will be in the process list while the command is running) :param str name: The name of the local user, which is assumed to be in the local directory service :param str password: The plaintext password to set :return: True if successful, otherwise False :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' mac_shadow.set_password macuser macpassword """ cmd = "dscl . -passwd /Users/{} '{}'".format(name, password) try: salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: if "eDSUnknownNodeName" in exc.strerror: raise CommandExecutionError("User not found: {}".format(name)) raise CommandExecutionError("Unknown error: {}".format(exc.strerror)) return True
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/mac_shadow.py
0.603581
0.177847
mac_shadow.py
pypi
import copy import logging import os import salt.fileclient import salt.utils.data import salt.utils.dictupdate as dictupdate import salt.utils.files import salt.utils.json import salt.utils.url import salt.utils.yaml __virtualname__ = "defaults" log = logging.getLogger(__name__) def _mk_client(): """ Create a file client and add it to the context """ if "cp.fileclient" not in __context__: __context__["cp.fileclient"] = salt.fileclient.get_file_client(__opts__) def _load(formula): """ Generates a list of salt://<formula>/defaults.(json|yaml) files and fetches them from the Salt master. Returns first defaults file as python dict. """ # Compute possibilities _mk_client() paths = [] for ext in ("yaml", "json"): source_url = salt.utils.url.create(formula + "/defaults." + ext) paths.append(source_url) # Fetch files from master defaults_files = __context__["cp.fileclient"].cache_files(paths) for file_ in defaults_files: if not file_: # Skip empty string returned by cp.fileclient.cache_files. continue suffix = file_.rsplit(".", 1)[-1] if suffix == "yaml": loader = salt.utils.yaml.safe_load elif suffix == "json": loader = salt.utils.json.load else: log.debug("Failed to determine loader for %r", file_) continue if os.path.exists(file_): log.debug("Reading defaults from %r", file_) with salt.utils.files.fopen(file_) as fhr: defaults = loader(fhr) log.debug("Read defaults %r", defaults) return defaults or {} def get(key, default=""): """ defaults.get is used much like pillar.get except that it will read a default value for a pillar from defaults.json or defaults.yaml files that are stored in the root of a salt formula. CLI Example: .. code-block:: bash salt '*' defaults.get core:users:root The defaults is computed from pillar key. The first entry is considered as the formula namespace. For example, querying ``core:users:root`` will try to load ``salt://core/defaults.yaml`` and ``salt://core/defaults.json``. """ # Determine formula namespace from query if ":" in key: namespace, key = key.split(":", 1) else: namespace, key = key, None # Fetch and load defaults formula files from states. defaults = _load(namespace) # Fetch value if key: return salt.utils.data.traverse_dict_and_list(defaults, key, default) else: return defaults def merge(dest, src, merge_lists=False, in_place=True, convert_none=True): """ defaults.merge Allows deep merging of dicts in formulas. merge_lists : False If True, it will also merge lists instead of replace their items. in_place : True If True, it will merge into dest dict, if not it will make a new copy from that dict and return it. convert_none : True If True, it will convert src and dest to empty dicts if they are None. If True and dest is None but in_place is True, raises TypeError. If False it will make a new copy from that dict and return it. .. versionadded:: 3005 CLI Example: .. code-block:: bash salt '*' defaults.merge '{a: b}' '{d: e}' It is more typical to use this in a templating language in formulas, instead of directly on the command-line. """ # Force empty dicts if applicable (useful for cleaner templating) src = {} if (src is None and convert_none) else src if dest is None and convert_none: if in_place: raise TypeError("Can't perform in-place merge into NoneType") else: dest = {} if in_place: merged = dest else: merged = copy.deepcopy(dest) return dictupdate.update(merged, src, merge_lists=merge_lists) def deepcopy(source): """ defaults.deepcopy Allows deep copy of objects in formulas. By default, Python does not copy objects, it creates bindings between a target and an object. It is more typical to use this in a templating language in formulas, instead of directly on the command-line. """ return copy.deepcopy(source) def update(dest, defaults, merge_lists=True, in_place=True, convert_none=True): """ defaults.update Allows setting defaults for group of data set e.g. group for nodes. This function is a combination of defaults.merge and defaults.deepcopy to avoid redundant in jinja. Example: .. code-block:: yaml group01: defaults: enabled: True extra: - test - stage nodes: host01: index: foo upstream: bar host02: index: foo2 upstream: bar2 .. code-block:: jinja {% do salt['defaults.update'](group01.nodes, group01.defaults) %} Each node will look like the following: .. code-block:: yaml host01: enabled: True index: foo upstream: bar extra: - test - stage merge_lists : True If True, it will also merge lists instead of replace their items. in_place : True If True, it will merge into dest dict. if not it will make a new copy from that dict and return it. convert_none : True If True, it will convert src and dest to empty dicts if they are None. If True and dest is None but in_place is True, raises TypeError. If False it will make a new copy from that dict and return it. .. versionadded:: 3005 It is more typical to use this in a templating language in formulas, instead of directly on the command-line. """ # Force empty dicts if applicable here if in_place: if dest is None: raise TypeError("Can't perform in-place update into NoneType") else: nodes = dest else: dest = {} if (dest is None and convert_none) else dest nodes = deepcopy(dest) defaults = {} if (defaults is None and convert_none) else defaults for node_name, node_vars in nodes.items(): defaults_vars = deepcopy(defaults) node_vars = merge( defaults_vars, node_vars, merge_lists=merge_lists, convert_none=convert_none ) nodes[node_name] = node_vars return nodes
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/defaults.py
0.603348
0.201381
defaults.py
pypi
import datetime import logging import re import time import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils log = logging.getLogger(__name__) __virtualname__ = "at" def __virtual__(): """ We only deal with Solaris' specific version of at """ if not salt.utils.platform.is_sunos(): return (False, "The at module could not be loaded: unsupported platform") if ( not salt.utils.path.which("at") or not salt.utils.path.which("atq") or not salt.utils.path.which("atrm") ): return (False, "The at module could not be loaded: at command not found") return __virtualname__ def atq(tag=None): """ List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] """ jobs = [] res = __salt__["cmd.run_all"]("atq") if res["retcode"] > 0: return {"error": res["stderr"]} # No jobs so return if res["stdout"] == "no files in queue.": return {"jobs": jobs} # Jobs created with at.at() will use the following # comment to denote a tagged job. job_kw_regex = re.compile(r"^### SALT: (\w+)") # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in res["stdout"].splitlines(): job_tag = "" # skip header if line.startswith(" Rank"): continue # parse job output tmp = line.split() timestr = " ".join(tmp[1:5]) job = tmp[6] specs = ( datetime.datetime(*(time.strptime(timestr, "%b %d, %Y %H:%M")[0:5])) .isoformat() .split("T") ) specs.append(tmp[7]) specs.append(tmp[5]) # make sure job is str job = str(job) # search for any tags atjob_file = "/var/spool/cron/atjobs/{job}".format(job=job) if __salt__["file.file_exists"](atjob_file): with salt.utils.files.fopen(atjob_file, "r") as atjob: for line in atjob: line = salt.utils.stringutils.to_unicode(line) tmp = job_kw_regex.match(line) if tmp: job_tag = tmp.groups()[0] # filter on tags if not tag: jobs.append( { "job": job, "date": specs[0], "time": specs[1], "queue": specs[2], "user": specs[3], "tag": job_tag, } ) elif tag and tag in [job_tag, job]: jobs.append( { "job": job, "date": specs[0], "time": specs[1], "queue": specs[2], "user": specs[3], "tag": job_tag, } ) return {"jobs": jobs} def atrm(*args): """ Remove jobs from the queue. CLI Example: .. code-block:: bash salt '*' at.atrm <jobid> <jobid> .. <jobid> salt '*' at.atrm all salt '*' at.atrm all [tag] """ if not args: return {"jobs": {"removed": [], "tag": None}} if args[0] == "all": if len(args) > 1: opts = list(list(map(str, [j["job"] for j in atq(args[1])["jobs"]]))) ret = {"jobs": {"removed": opts, "tag": args[1]}} else: opts = list(list(map(str, [j["job"] for j in atq()["jobs"]]))) ret = {"jobs": {"removed": opts, "tag": None}} else: opts = list( list(map(str, [i["job"] for i in atq()["jobs"] if i["job"] in args])) ) ret = {"jobs": {"removed": opts, "tag": None}} # call atrm for each job in ret['jobs']['removed'] for job in ret["jobs"]["removed"]: res_job = __salt__["cmd.run_all"]("atrm {job}".format(job=job)) if res_job["retcode"] > 0: if "failed" not in ret["jobs"]: ret["jobs"]["failed"] = {} ret["jobs"]["failed"][job] = res_job["stderr"] # remove failed from list if "failed" in ret["jobs"]: for job in ret["jobs"]["failed"]: ret["jobs"]["removed"].remove(job) return ret def at(*args, **kwargs): # pylint: disable=C0103 """ Add a job to the queue. The 'timespec' follows the format documented in the at(1) manpage. CLI Example: .. code-block:: bash salt '*' at.at <timespec> <cmd> [tag=<tag>] [runas=<user>] salt '*' at.at 12:05am '/sbin/reboot' tag=reboot salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim """ # check args if len(args) < 2: return {"jobs": []} # build job if "tag" in kwargs: stdin = "### SALT: {}\n{}".format(kwargs["tag"], " ".join(args[1:])) else: stdin = " ".join(args[1:]) cmd_kwargs = {"stdin": stdin, "python_shell": False} if "runas" in kwargs: cmd_kwargs["runas"] = kwargs["runas"] res = __salt__["cmd.run_all"]( 'at "{timespec}"'.format(timespec=args[0]), **cmd_kwargs ) # verify job creation if res["retcode"] > 0: if "bad time specification" in res["stderr"]: return {"jobs": [], "error": "invalid timespec"} return {"jobs": [], "error": res["stderr"]} else: jobid = res["stderr"].splitlines()[1] jobid = str(jobid.split()[1]) return atq(jobid) def atc(jobid): """ Print the at(1) script that will run for the passed job id. This is mostly for debugging so the output will just be text. CLI Example: .. code-block:: bash salt '*' at.atc <jobid> """ atjob_file = "/var/spool/cron/atjobs/{job}".format(job=jobid) if __salt__["file.file_exists"](atjob_file): with salt.utils.files.fopen(atjob_file, "r") as rfh: return "".join( [salt.utils.stringutils.to_unicode(x) for x in rfh.readlines()] ) else: return {"error": "invalid job id '{}'".format(jobid)} def _atq(**kwargs): """ Return match jobs list """ jobs = [] runas = kwargs.get("runas", None) tag = kwargs.get("tag", None) hour = kwargs.get("hour", None) minute = kwargs.get("minute", None) day = kwargs.get("day", None) month = kwargs.get("month", None) year = kwargs.get("year", None) if year and len(str(year)) == 2: year = "20{}".format(year) jobinfo = atq()["jobs"] if not jobinfo: return {"jobs": jobs} for job in jobinfo: if not runas: pass elif runas == job["user"]: pass else: continue if not tag: pass elif tag == job["tag"]: pass else: continue if not hour: pass elif "{:02d}".format(int(hour)) == job["time"].split(":")[0]: pass else: continue if not minute: pass elif "{:02d}".format(int(minute)) == job["time"].split(":")[1]: pass else: continue if not day: pass elif "{:02d}".format(int(day)) == job["date"].split("-")[2]: pass else: continue if not month: pass elif "{:02d}".format(int(month)) == job["date"].split("-")[1]: pass else: continue if not year: pass elif year == job["date"].split("-")[0]: pass else: continue jobs.append(job) if not jobs: note = "No match jobs or time format error" return {"jobs": jobs, "note": note} return {"jobs": jobs} def jobcheck(**kwargs): """ Check the job from queue. The kwargs dict include 'hour minute day month year tag runas' Other parameters will be ignored. CLI Example: .. code-block:: bash salt '*' at.jobcheck runas=jam day=13 salt '*' at.jobcheck day=13 month=12 year=13 tag=rose """ if not kwargs: return {"error": "You have given a condition"} return _atq(**kwargs) # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/at_solaris.py
0.483161
0.196768
at_solaris.py
pypi
import salt.utils.path def __virtual__(): """ Only available on systems with Riak installed. """ if salt.utils.path.which("riak"): return True return ( False, "The riak execution module failed to load: the riak binary is not in the path.", ) def __execute_cmd(name, cmd): """ Execute Riak commands """ return __salt__["cmd.run_all"]("{} {}".format(salt.utils.path.which(name), cmd)) def start(): """ Start Riak CLI Example: .. code-block:: bash salt '*' riak.start """ ret = {"comment": "", "success": False} cmd = __execute_cmd("riak", "start") if cmd["retcode"] != 0: ret["comment"] = cmd["stderr"] else: ret["comment"] = cmd["stdout"] ret["success"] = True return ret def stop(): """ Stop Riak .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.stop """ ret = {"comment": "", "success": False} cmd = __execute_cmd("riak", "stop") if cmd["retcode"] != 0: ret["comment"] = cmd["stderr"] else: ret["comment"] = cmd["stdout"] ret["success"] = True return ret def cluster_join(username, hostname): """ Join a Riak cluster .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.cluster_join <user> <host> username - The riak username to join the cluster hostname - The riak hostname you are connecting to """ ret = {"comment": "", "success": False} cmd = __execute_cmd("riak-admin", "cluster join {}@{}".format(username, hostname)) if cmd["retcode"] != 0: ret["comment"] = cmd["stdout"] else: ret["comment"] = cmd["stdout"] ret["success"] = True return ret def cluster_leave(username, hostname): """ Leave a Riak cluster .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.cluster_leave <username> <host> username - The riak username to join the cluster hostname - The riak hostname you are connecting to """ ret = {"comment": "", "success": False} cmd = __execute_cmd("riak-admin", "cluster leave {}@{}".format(username, hostname)) if cmd["retcode"] != 0: ret["comment"] = cmd["stdout"] else: ret["comment"] = cmd["stdout"] ret["success"] = True return ret def cluster_plan(): """ Review Cluster Plan .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.cluster_plan """ cmd = __execute_cmd("riak-admin", "cluster plan") if cmd["retcode"] != 0: return False return True def cluster_commit(): """ Commit Cluster Changes .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.cluster_commit """ ret = {"comment": "", "success": False} cmd = __execute_cmd("riak-admin", "cluster commit") if cmd["retcode"] != 0: ret["comment"] = cmd["stdout"] else: ret["comment"] = cmd["stdout"] ret["success"] = True return ret def member_status(): """ Get cluster member status .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.member_status """ ret = { "membership": {}, "summary": {"Valid": 0, "Leaving": 0, "Exiting": 0, "Joining": 0, "Down": 0}, } out = __execute_cmd("riak-admin", "member-status")["stdout"].splitlines() for line in out: if line.startswith(("=", "-", "Status")): continue if "/" in line: # We're in the summary line for item in line.split("/"): key, val = item.split(":") ret["summary"][key.strip()] = val.strip() if len(line.split()) == 4: # We're on a node status line (status, ring, pending, node) = line.split() ret["membership"][node] = { "Status": status, "Ring": ring, "Pending": pending, } return ret def status(): """ Current node status .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.status """ ret = {} cmd = __execute_cmd("riak-admin", "status") for i in cmd["stdout"].splitlines(): if ":" in i: (name, val) = i.split(":", 1) ret[name.strip()] = val.strip() return ret def test(): """ Runs a test of a few standard Riak operations .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.test """ ret = {"comment": "", "success": False} cmd = __execute_cmd("riak-admin", "test") if cmd["retcode"] != 0: ret["comment"] = cmd["stdout"] else: ret["comment"] = cmd["stdout"] ret["success"] = True return ret def services(): """ List available services on a node .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.services """ cmd = __execute_cmd("riak-admin", "services") return cmd["stdout"][1:-1].split(",")
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/riak.py
0.645567
0.220154
riak.py
pypi
import salt.utils.nacl __virtualname__ = "nacl" def __virtual__(): if __opts__["fips_mode"] is True: return False, "nacl module not available in FIPS mode" return salt.utils.nacl.check_requirements() def keygen(sk_file=None, pk_file=None, **kwargs): """ Use libnacl to generate a keypair. If no `sk_file` is defined return a keypair. If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`. When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated using the `sk_file`. CLI Examples: .. code-block:: bash salt-call nacl.keygen salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub salt-call --local nacl.keygen """ kwargs["opts"] = __opts__ return salt.utils.nacl.keygen(sk_file, pk_file, **kwargs) def enc(data, **kwargs): """ Alias to `{box_type}_encrypt` box_type: secretbox, sealedbox(default) """ kwargs["opts"] = __opts__ return salt.utils.nacl.enc(data, **kwargs) def enc_file(name, out=None, **kwargs): """ This is a helper function to encrypt a file and return its contents. You can provide an optional output file using `out` `name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc. CLI Examples: .. code-block:: bash salt-run nacl.enc_file name=/tmp/id_rsa salt-call nacl.enc_file name=salt://crt/mycert out=/tmp/cert salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \ sk_file=/etc/salt/pki/master/nacl.pub """ kwargs["opts"] = __opts__ return salt.utils.nacl.enc_file(name, out, **kwargs) def dec(data, **kwargs): """ Alias to `{box_type}_decrypt` box_type: secretbox, sealedbox(default) """ kwargs["opts"] = __opts__ return salt.utils.nacl.dec(data, **kwargs) def dec_file(name, out=None, **kwargs): """ This is a helper function to decrypt a file and return its contents. You can provide an optional output file using `out` `name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc. CLI Examples: .. code-block:: bash salt-run nacl.dec_file name=/tmp/id_rsa.nacl salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \ sk_file=/etc/salt/pki/master/nacl.pub """ kwargs["opts"] = __opts__ return salt.utils.nacl.dec_file(name, out, **kwargs) def sealedbox_encrypt(data, **kwargs): """ Encrypt data using a public key generated from `nacl.keygen`. The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key. CLI Examples: .. code-block:: bash salt-run nacl.sealedbox_encrypt datatoenc salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ=' """ kwargs["opts"] = __opts__ return salt.utils.nacl.sealedbox_encrypt(data, **kwargs) def sealedbox_decrypt(data, **kwargs): """ Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`. CLI Examples: .. code-block:: bash salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A= salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' """ kwargs["opts"] = __opts__ return salt.utils.nacl.sealedbox_decrypt(data, **kwargs) def secretbox_encrypt(data, **kwargs): """ Encrypt data using a secret key generated from `nacl.keygen`. The same secret key can be used to decrypt the data using `nacl.secretbox_decrypt`. CLI Examples: .. code-block:: bash salt-run nacl.secretbox_encrypt datatoenc salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo=' """ kwargs["opts"] = __opts__ return salt.utils.nacl.secretbox_encrypt(data, **kwargs) def secretbox_decrypt(data, **kwargs): """ Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key that was generated from `nacl.keygen`. CLI Examples: .. code-block:: bash salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A= salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' """ kwargs["opts"] = __opts__ return salt.utils.nacl.secretbox_decrypt(data, **kwargs)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/nacl.py
0.743354
0.171546
nacl.py
pypi
import logging as logger import salt.utils.http import salt.utils.json import salt.utils.path # Import Salt lobs from salt.utils.decorators import memoize # Setup the logger log = logger.getLogger(__name__) def __virtual__(): if salt.utils.path.which("kapacitor"): return "kapacitor" else: return (False, "Missing dependency: kapacitor") @memoize def version(): """ Get the kapacitor version. """ version = __salt__["pkg.version"]("kapacitor") if not version: version = str(__salt__["config.option"]("kapacitor.version", "latest")) return version def _get_url(): """ Get the kapacitor URL. """ protocol = __salt__["config.option"]("kapacitor.protocol", "http") host = __salt__["config.option"]("kapacitor.host", "localhost") port = __salt__["config.option"]("kapacitor.port", 9092) return "{}://{}:{}".format(protocol, host, port) def get_task(name): """ Get a dict of data on a task. name Name of the task to get information about. CLI Example: .. code-block:: bash salt '*' kapacitor.get_task cpu """ url = _get_url() if version() < "0.13": task_url = "{}/task?name={}".format(url, name) else: task_url = "{}/kapacitor/v1/tasks/{}?skip-format=true".format(url, name) response = salt.utils.http.query(task_url, status=True) if response["status"] == 404: return None data = salt.utils.json.loads(response["body"]) if version() < "0.13": return { "script": data["TICKscript"], "type": data["Type"], "dbrps": data["DBRPs"], "enabled": data["Enabled"], } return { "script": data["script"], "type": data["type"], "dbrps": data["dbrps"], "enabled": data["status"] == "enabled", } def _run_cmd(cmd): """ Run a Kapacitor task and return a dictionary of info. """ ret = {} env_vars = { "KAPACITOR_URL": _get_url(), "KAPACITOR_UNSAFE_SSL": __salt__["config.option"]( "kapacitor.unsafe_ssl", "false" ), } result = __salt__["cmd.run_all"](cmd, env=env_vars) if result.get("stdout"): ret["stdout"] = result["stdout"] if result.get("stderr"): ret["stderr"] = result["stderr"] ret["success"] = result["retcode"] == 0 return ret def define_task( name, tick_script, task_type="stream", database=None, retention_policy="default", dbrps=None, ): """ Define a task. Serves as both create/update. name Name of the task. tick_script Path to the TICK script for the task. Can be a salt:// source. task_type Task type. Defaults to 'stream' dbrps A list of databases and retention policies in "dbname"."rpname" format to fetch data from. For backward compatibility, the value of 'database' and 'retention_policy' will be merged as part of dbrps. .. versionadded:: 2019.2.0 database Which database to fetch data from. retention_policy Which retention policy to fetch data from. Defaults to 'default'. CLI Example: .. code-block:: bash salt '*' kapacitor.define_task cpu salt://kapacitor/cpu.tick database=telegraf """ if not database and not dbrps: log.error("Providing database name or dbrps is mandatory.") return False if version() < "0.13": cmd = "kapacitor define -name {}".format(name) else: cmd = "kapacitor define {}".format(name) if tick_script.startswith("salt://"): tick_script = __salt__["cp.cache_file"](tick_script, __env__) cmd += " -tick {}".format(tick_script) if task_type: cmd += " -type {}".format(task_type) if not dbrps: dbrps = [] if database and retention_policy: dbrp = "{}.{}".format(database, retention_policy) dbrps.append(dbrp) if dbrps: for dbrp in dbrps: cmd += " -dbrp {}".format(dbrp) return _run_cmd(cmd) def delete_task(name): """ Delete a kapacitor task. name Name of the task to delete. CLI Example: .. code-block:: bash salt '*' kapacitor.delete_task cpu """ return _run_cmd("kapacitor delete tasks {}".format(name)) def enable_task(name): """ Enable a kapacitor task. name Name of the task to enable. CLI Example: .. code-block:: bash salt '*' kapacitor.enable_task cpu """ return _run_cmd("kapacitor enable {}".format(name)) def disable_task(name): """ Disable a kapacitor task. name Name of the task to disable. CLI Example: .. code-block:: bash salt '*' kapacitor.disable_task cpu """ return _run_cmd("kapacitor disable {}".format(name))
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/kapacitor.py
0.484868
0.189259
kapacitor.py
pypi
import logging import os import re # Import salt libraries import salt.utils.files import salt.utils.platform import salt.utils.stringutils from salt.exceptions import CommandExecutionError # Set up logger log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "cryptdev" def __virtual__(): """ Only load on POSIX-like systems """ if salt.utils.platform.is_windows(): return (False, "The cryptdev module cannot be loaded: not a POSIX-like system") return True class _crypttab_entry: """ Utility class for manipulating crypttab entries. Primarily we're parsing, formatting, and comparing lines. Parsing emits dicts expected from crypttab() or raises a ValueError. """ class ParseError(ValueError): """Error raised when a line isn't parsible as a crypttab entry""" crypttab_keys = ("name", "device", "password", "options") crypttab_format = "{name: <12} {device: <44} {password: <22} {options}\n" @classmethod def dict_from_line(cls, line, keys=crypttab_keys): if len(keys) != 4: raise ValueError("Invalid key array: {}".format(keys)) if line.startswith("#"): raise cls.ParseError("Comment!") comps = line.split() # If there are only three entries, then the options have been omitted. if len(comps) == 3: comps += [""] if len(comps) != 4: raise cls.ParseError("Invalid Entry!") return dict(zip(keys, comps)) @classmethod def from_line(cls, *args, **kwargs): return cls(**cls.dict_from_line(*args, **kwargs)) @classmethod def dict_to_line(cls, entry): return cls.crypttab_format.format(**entry) def __str__(self): """String value, only works for full repr""" return self.dict_to_line(self.criteria) def __repr__(self): """Always works""" return repr(self.criteria) def pick(self, keys): """Returns an instance with just those keys""" subset = {key: self.criteria[key] for key in keys} return self.__class__(**subset) def __init__(self, **criteria): """Store non-empty, non-null values to use as filter""" self.criteria = { key: salt.utils.stringutils.to_unicode(value) for key, value in criteria.items() if value is not None } @staticmethod def norm_path(path): """Resolve equivalent paths equivalently""" return os.path.normcase(os.path.normpath(path)) def match(self, line): """Compare potentially partial criteria against a complete line""" entry = self.dict_from_line(line) for key, value in self.criteria.items(): if entry[key] != value: return False return True def active(): """ List existing device-mapper device details. """ ret = {} # TODO: This command should be extended to collect more information, such as UUID. devices = __salt__["cmd.run_stdout"]("dmsetup ls --target crypt") out_regex = re.compile(r"(?P<devname>\w+)\W+\((?P<major>\d+), (?P<minor>\d+)\)") log.debug(devices) for line in devices.split("\n"): match = out_regex.match(line) if match: dev_info = match.groupdict() ret[dev_info["devname"]] = dev_info else: log.warning("dmsetup output does not match expected format") return ret def crypttab(config="/etc/crypttab"): """ List the contents of the crypttab CLI Example: .. code-block:: bash salt '*' cryptdev.crypttab """ ret = {} if not os.path.isfile(config): return ret with salt.utils.files.fopen(config) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line).rstrip("\n") try: entry = _crypttab_entry.dict_from_line(line) entry["options"] = entry["options"].split(",") # Handle duplicate names by appending `_` while entry["name"] in ret: entry["name"] += "_" ret[entry.pop("name")] = entry except _crypttab_entry.ParseError: pass return ret def rm_crypttab(name, config="/etc/crypttab"): """ Remove the named mapping from the crypttab. If the described entry does not exist, nothing is changed, but the command succeeds by returning ``'absent'``. If a line is removed, it returns ``'change'``. CLI Example: .. code-block:: bash salt '*' cryptdev.rm_crypttab foo """ modified = False criteria = _crypttab_entry(name=name) # For each line in the config that does not match the criteria, add it to # the list. At the end, re-create the config from just those lines. lines = [] try: with salt.utils.files.fopen(config, "r") as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) try: if criteria.match(line): modified = True else: lines.append(line) except _crypttab_entry.ParseError: lines.append(line) except OSError as exc: msg = "Could not read from {0}: {1}" raise CommandExecutionError(msg.format(config, exc)) if modified: try: with salt.utils.files.fopen(config, "w+") as ofile: ofile.writelines(salt.utils.stringutils.to_str(line) for line in lines) except OSError as exc: msg = "Could not write to {0}: {1}" raise CommandExecutionError(msg.format(config, exc)) # If we reach this point, the changes were successful return "change" if modified else "absent" def set_crypttab( name, device, password="none", options="", config="/etc/crypttab", test=False, match_on="name", ): """ Verify that this device is represented in the crypttab, change the device to match the name passed, or add the name if it is not present. CLI Example: .. code-block:: bash salt '*' cryptdev.set_crypttab foo /dev/sdz1 mypassword swap,size=256 """ # Fix the options type if it is not a string if options is None: options = "" elif isinstance(options, str): pass elif isinstance(options, list): options = ",".join(options) else: msg = "options must be a string or list of strings" raise CommandExecutionError(msg) # preserve arguments for updating entry_args = { "name": name, "device": device, "password": password if password is not None else "none", "options": options, } lines = [] ret = None # Transform match_on into list--items will be checked later if isinstance(match_on, list): pass elif not isinstance(match_on, str): msg = "match_on must be a string or list of strings" raise CommandExecutionError(msg) else: match_on = [match_on] # generate entry and criteria objects, handle invalid keys in match_on entry = _crypttab_entry(**entry_args) try: criteria = entry.pick(match_on) except KeyError: filterFn = lambda key: key not in _crypttab_entry.crypttab_keys invalid_keys = filter(filterFn, match_on) msg = 'Unrecognized keys in match_on: "{}"'.format(invalid_keys) raise CommandExecutionError(msg) # parse file, use ret to cache status if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{}"'.format(config)) try: with salt.utils.files.fopen(config, "r") as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) try: if criteria.match(line): # Note: If ret isn't None here, # we've matched multiple lines ret = "present" if entry.match(line): lines.append(line) else: ret = "change" lines.append(str(entry)) else: lines.append(line) except _crypttab_entry.ParseError: lines.append(line) except OSError as exc: msg = "Couldn't read from {0}: {1}" raise CommandExecutionError(msg.format(config, exc)) # add line if not present or changed if ret is None: lines.append(str(entry)) ret = "new" if ret != "present": # ret in ['new', 'change']: if not test: try: with salt.utils.files.fopen(config, "w+") as ofile: # The line was changed, commit it! ofile.writelines( salt.utils.stringutils.to_str(line) for line in lines ) except OSError: msg = "File not writable {0}" raise CommandExecutionError(msg.format(config)) return ret def open(name, device, keyfile): """ Open a crypt device using ``cryptsetup``. The ``keyfile`` must not be ``None`` or ``'none'``, because ``cryptsetup`` will otherwise ask for the password interactively. CLI Example: .. code-block:: bash salt '*' cryptdev.open foo /dev/sdz1 /path/to/keyfile """ if keyfile is None or keyfile == "none" or keyfile == "-": raise CommandExecutionError( "For immediate crypt device mapping, keyfile must not be none" ) code = __salt__["cmd.retcode"]( "cryptsetup open --key-file {} {} {}".format(keyfile, device, name) ) return code == 0 def close(name): """ Close a crypt device using ``cryptsetup``. CLI Example: .. code-block:: bash salt '*' cryptdev.close foo """ code = __salt__["cmd.retcode"]("cryptsetup close {}".format(name)) return code == 0
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/cryptdev.py
0.583441
0.241037
cryptdev.py
pypi
import functools import logging import re try: from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import CommandExecutionError HAS_REQUIRED_LIBS = True except ImportError: HAS_REQUIRED_LIBS = False log = logging.getLogger(__name__) __virtualname__ = "kernelpkg" def __virtual__(): """ Load this module on Debian-based systems only """ if not HAS_REQUIRED_LIBS: return (False, "Required library could not be imported") if __grains__.get("os_family", "") in ("Kali", "Debian"): return __virtualname__ elif __grains__.get("os_family", "") == "Cumulus": return __virtualname__ return (False, "Module kernelpkg_linux_apt: no APT based system detected") def active(): """ Return the version of the running kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.active """ if "pkg.normalize_name" in __salt__: return __salt__["pkg.normalize_name"](__grains__["kernelrelease"]) return __grains__["kernelrelease"] def list_installed(): """ Return a list of all installed kernels. CLI Example: .. code-block:: bash salt '*' kernelpkg.list_installed """ pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type())) pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True) if pkgs is None: pkgs = [] result = list(filter(pkg_re.match, pkgs)) if result is None: return [] prefix_len = len(_package_prefix()) + 1 return sorted( (pkg[prefix_len:] for pkg in result), key=functools.cmp_to_key(_cmp_version) ) def latest_available(): """ Return the version of the latest kernel from the package repositories. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_available """ result = __salt__["pkg.latest_version"]( "{}-{}".format(_package_prefix(), _kernel_type()) ) if result == "": return latest_installed() version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result) return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type()) def latest_installed(): """ Return the version of the latest installed kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_installed .. note:: This function may not return the same value as :py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel has been installed and the system has not yet been rebooted. The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function exists to detect this condition. """ pkgs = list_installed() if pkgs: return pkgs[-1] return None def needs_reboot(): """ Detect if a new kernel version has been installed but is not running. Returns True if a new kernel is installed, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.needs_reboot """ return _LooseVersion(active()) < _LooseVersion(latest_installed()) def upgrade(reboot=False, at_time=None): """ Upgrade the kernel and optionally reboot the system. reboot : False Request a reboot if a new kernel is available. at_time : immediate Schedule the reboot at some point in the future. This argument is ignored if ``reboot=False``. See :py:func:`~salt.modules.system.reboot` for more details on this argument. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade salt '*' kernelpkg.upgrade reboot=True at_time=1 .. note:: An immediate reboot often shuts down the system before the minion has a chance to return, resulting in errors. A minimal delay (1 minute) is useful to ensure the result is delivered to the master. """ result = __salt__["pkg.install"]( name="{}-{}".format(_package_prefix(), latest_available()) ) _needs_reboot = needs_reboot() ret = { "upgrades": result, "active": active(), "latest_installed": latest_installed(), "reboot_requested": reboot, "reboot_required": _needs_reboot, } if reboot and _needs_reboot: log.warning("Rebooting system due to kernel upgrade") __salt__["system.reboot"](at_time=at_time) return ret def upgrade_available(): """ Detect if a new kernel version is available in the repositories. Returns True if a new kernel is available, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade_available """ return _LooseVersion(latest_available()) > _LooseVersion(latest_installed()) def remove(release): """ Remove a specific version of the kernel. release The release number of an installed kernel. This must be the entire release number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`, not the package name. CLI Example: .. code-block:: bash salt '*' kernelpkg.remove 4.4.0-70-generic """ if release not in list_installed(): raise CommandExecutionError( "Kernel release '{}' is not installed".format(release) ) if release == active(): raise CommandExecutionError("Active kernel cannot be removed") target = "{}-{}".format(_package_prefix(), release) log.info("Removing kernel package %s", target) __salt__["pkg.purge"](target) return {"removed": [target]} def cleanup(keep_latest=True): """ Remove all unused kernel packages from the system. keep_latest : True In the event that the active kernel is not the latest one installed, setting this to True will retain the latest kernel package, in addition to the active one. If False, all kernel packages other than the active one will be removed. CLI Example: .. code-block:: bash salt '*' kernelpkg.cleanup """ removed = [] # Loop over all installed kernel packages for kernel in list_installed(): # Keep the active kernel package if kernel == active(): continue # Optionally keep the latest kernel package if keep_latest and kernel == latest_installed(): continue # Remove the kernel package removed.extend(remove(kernel)["removed"]) return {"removed": removed} def _package_prefix(): """ Return static string for the package prefix """ return "linux-image" def _kernel_type(): """ Parse the kernel name and return its type """ return re.match(r"^[\d.-]+-(.+)$", active()).group(1) def _cmp_version(item1, item2): """ Compare function for package version sorting """ vers1 = _LooseVersion(item1) vers2 = _LooseVersion(item2) if vers1 < vers2: return -1 if vers1 > vers2: return 1 return 0
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/kernelpkg_linux_apt.py
0.725162
0.196094
kernelpkg_linux_apt.py
pypi
import fnmatch import logging import re import salt.utils.platform log = logging.getLogger(__name__) __func_alias__ = {"list_": "list"} # Define the module's virtual name __virtualname__ = "service" def __virtual__(): """ Only work on systems that are a proxy minion """ try: if ( salt.utils.platform.is_proxy() and __opts__["proxy"]["proxytype"] == "ssh_sample" ): return __virtualname__ except KeyError: return ( False, "The ssh_service execution module failed to load. Check the " "proxy key in pillar.", ) return ( False, "The ssh_service execution module failed to load: only works on an " "ssh_sample proxy minion.", ) def get_all(): """ Return a list of all available services CLI Example: .. code-block:: bash salt '*' service.get_all """ proxy_fn = "ssh_sample.service_list" return __proxy__[proxy_fn]() def list_(): """ Return a list of all available services. CLI Example: .. code-block:: bash salt '*' service.list """ return get_all() def start(name, sig=None): """ Start the specified service on the ssh_sample CLI Example: .. code-block:: bash salt '*' service.start <service name> """ proxy_fn = "ssh_sample.service_start" return __proxy__[proxy_fn](name) def stop(name, sig=None): """ Stop the specified service on the rest_sample CLI Example: .. code-block:: bash salt '*' service.stop <service name> """ proxy_fn = "ssh_sample.service_stop" return __proxy__[proxy_fn](name) def restart(name, sig=None): """ Restart the specified service with rest_sample CLI Example: .. code-block:: bash salt '*' service.restart <service name> """ proxy_fn = "ssh_sample.service_restart" return __proxy__[proxy_fn](name) def status(name, sig=None): """ Return the status for a service via ssh_sample. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> """ proxy_fn = "ssh_sample.service_status" contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: resp = __proxy__[proxy_fn](service) if resp["comment"] == "running": results[service] = True else: results[service] = False if contains_globbing: return results return results[name] def running(name, sig=None): """ Return whether this service is running. """ return status(name).get(name, False) def enabled(name, sig=None): """ Only the 'redbull' service is 'enabled' in the test """ return name == "redbull"
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/ssh_service.py
0.647352
0.199133
ssh_service.py
pypi
import logging # Import salt modules try: import scp import paramiko HAS_SCP = True except ImportError: HAS_SCP = False __proxyenabled__ = ["*"] __virtualname__ = "scp" log = logging.getLogger(__name__) def __virtual__(): if not HAS_SCP: return False, "Please install SCP for this modules: pip install scp" return __virtualname__ def _select_kwargs(**kwargs): paramiko_kwargs = {} scp_kwargs = {} paramiko_args = __utils__["args.get_function_argspec"](paramiko.SSHClient.connect)[ 0 ] paramiko_args.append("auto_add_policy") scp_args = __utils__["args.get_function_argspec"](scp.SCPClient.__init__)[0] scp_args.pop(0) # strip transport arg (it is passed in _prepare_connection) for key, val in kwargs.items(): if key in paramiko_args and val is not None: paramiko_kwargs[key] = val if key in scp_args and val is not None: scp_kwargs[key] = val return paramiko_kwargs, scp_kwargs def _prepare_connection(**kwargs): """ Prepare the underlying SSH connection with the remote target. """ paramiko_kwargs, scp_kwargs = _select_kwargs(**kwargs) ssh = paramiko.SSHClient() if paramiko_kwargs.pop("auto_add_policy", False): ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(**paramiko_kwargs) scp_client = scp.SCPClient(ssh.get_transport(), **scp_kwargs) return scp_client def get(remote_path, local_path="", recursive=False, preserve_times=False, **kwargs): """ Transfer files and directories from remote host to the localhost of the Minion. remote_path Path to retrieve from remote host. Since this is evaluated by scp on the remote host, shell wildcards and environment variables may be used. recursive: ``False`` Transfer files and directories recursively. preserve_times: ``False`` Preserve ``mtime`` and ``atime`` of transferred files and directories. hostname The hostname of the remote device. port: ``22`` The port of the remote device. username The username required for SSH authentication on the device. password Used for password authentication. It is also used for private key decryption if ``passphrase`` is not given. passphrase Used for decrypting private keys. pkey An optional private key to use for authentication. key_filename The filename, or list of filenames, of optional private key(s) and/or certificates to try for authentication. timeout An optional timeout (in seconds) for the TCP connect. socket_timeout: ``10`` The channel socket timeout in seconds. buff_size: ``16384`` The size of the SCP send buffer. allow_agent: ``True`` Set to ``False`` to disable connecting to the SSH agent. look_for_keys: ``True`` Set to ``False`` to disable searching for discoverable private key files in ``~/.ssh/`` banner_timeout An optional timeout (in seconds) to wait for the SSH banner to be presented. auth_timeout An optional timeout (in seconds) to wait for an authentication response. auto_add_policy: ``False`` Automatically add the host to the ``known_hosts``. CLI Example: .. code-block:: bash salt '*' scp.get /var/tmp/file /tmp/file hostname=10.10.10.1 auto_add_policy=True """ scp_client = _prepare_connection(**kwargs) get_kwargs = {"recursive": recursive, "preserve_times": preserve_times} if local_path: get_kwargs["local_path"] = local_path return scp_client.get(remote_path, **get_kwargs) def put( files, remote_path=None, recursive=False, preserve_times=False, saltenv="base", **kwargs ): """ Transfer files and directories to remote host. files A single path or a list of paths to be transferred. remote_path The path on the remote device where to store the files. recursive: ``True`` Transfer files and directories recursively. preserve_times: ``False`` Preserve ``mtime`` and ``atime`` of transferred files and directories. hostname The hostname of the remote device. port: ``22`` The port of the remote device. username The username required for SSH authentication on the device. password Used for password authentication. It is also used for private key decryption if ``passphrase`` is not given. passphrase Used for decrypting private keys. pkey An optional private key to use for authentication. key_filename The filename, or list of filenames, of optional private key(s) and/or certificates to try for authentication. timeout An optional timeout (in seconds) for the TCP connect. socket_timeout: ``10`` The channel socket timeout in seconds. buff_size: ``16384`` The size of the SCP send buffer. allow_agent: ``True`` Set to ``False`` to disable connecting to the SSH agent. look_for_keys: ``True`` Set to ``False`` to disable searching for discoverable private key files in ``~/.ssh/`` banner_timeout An optional timeout (in seconds) to wait for the SSH banner to be presented. auth_timeout An optional timeout (in seconds) to wait for an authentication response. auto_add_policy: ``False`` Automatically add the host to the ``known_hosts``. CLI Example: .. code-block:: bash salt '*' scp.put /path/to/file /var/tmp/file hostname=server1 auto_add_policy=True """ scp_client = _prepare_connection(**kwargs) put_kwargs = {"recursive": recursive, "preserve_times": preserve_times} if remote_path: put_kwargs["remote_path"] = remote_path cached_files = [] if not isinstance(files, (list, tuple)): files = [files] for file_ in files: cached_file = __salt__["cp.cache_file"](file_, saltenv=saltenv) cached_files.append(cached_file) return scp_client.put(cached_files, **put_kwargs)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/scp_mod.py
0.610221
0.161353
scp_mod.py
pypi
import logging import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): """ Only works with systemd or on supported POSIX-like systems """ if salt.utils.path.which("localectl") or __grains__["os_family"] in ( "RedHat", "Debian", "Gentoo", ): return True return ( False, "The keyboard exeuction module cannot be loaded: " "only works on Redhat, Debian or Gentoo systems or if localectl binary in" " path.", ) def get_sys(): """ Get current system keyboard setting CLI Example: .. code-block:: bash salt '*' keyboard.get_sys """ cmd = "" if salt.utils.path.which("localectl"): cmd = 'localectl | grep Keymap | sed -e"s/: /=/" -e"s/^[ \t]*//"' elif "RedHat" in __grains__["os_family"]: cmd = 'grep LAYOUT /etc/sysconfig/keyboard | grep -vE "^#"' elif "Debian" in __grains__["os_family"]: cmd = 'grep XKBLAYOUT /etc/default/keyboard | grep -vE "^#"' elif "Gentoo" in __grains__["os_family"]: cmd = 'grep "^keymap" /etc/conf.d/keymaps | grep -vE "^#"' out = __salt__["cmd.run"](cmd, python_shell=True).split("=") ret = out[1].replace('"', "") return ret def set_sys(layout): """ Set current system keyboard setting CLI Example: .. code-block:: bash salt '*' keyboard.set_sys dvorak """ if salt.utils.path.which("localectl"): __salt__["cmd.run"]("localectl set-keymap {}".format(layout)) elif "RedHat" in __grains__["os_family"]: __salt__["file.sed"]( "/etc/sysconfig/keyboard", "^LAYOUT=.*", "LAYOUT={}".format(layout) ) elif "Debian" in __grains__["os_family"]: __salt__["file.sed"]( "/etc/default/keyboard", "^XKBLAYOUT=.*", "XKBLAYOUT={}".format(layout) ) elif "Gentoo" in __grains__["os_family"]: __salt__["file.sed"]( "/etc/conf.d/keymaps", "^keymap=.*", "keymap={}".format(layout) ) return layout def get_x(): """ Get current X keyboard setting CLI Example: .. code-block:: bash salt '*' keyboard.get_x """ cmd = "setxkbmap -query | grep layout" out = __salt__["cmd.run"](cmd, python_shell=True).split(":") return out[1].strip() def set_x(layout): """ Set current X keyboard setting CLI Example: .. code-block:: bash salt '*' keyboard.set_x dvorak """ cmd = "setxkbmap {}".format(layout) __salt__["cmd.run"](cmd) return layout
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/keyboard.py
0.421314
0.161684
keyboard.py
pypi
import logging import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = "macdefaults" def __virtual__(): """ Only work on Mac OS """ if salt.utils.platform.is_darwin(): return __virtualname__ return False def write(domain, key, value, type="string", user=None): """ Write a default to the system CLI Example: .. code-block:: bash salt '*' macdefaults.write com.apple.CrashReporter DialogType Server salt '*' macdefaults.write NSGlobalDomain ApplePersistence True type=bool domain The name of the domain to write to key The key of the given domain to write to value The value to write to the given key type The type of value to be written, valid types are string, data, int[eger], float, bool[ean], date, array, array-add, dict, dict-add user The user to write the defaults to """ if type == "bool" or type == "boolean": if value is True: value = "TRUE" elif value is False: value = "FALSE" cmd = 'defaults write "{}" "{}" -{} "{}"'.format(domain, key, type, value) return __salt__["cmd.run_all"](cmd, runas=user) def read(domain, key, user=None): """ Read a default from the system CLI Example: .. code-block:: bash salt '*' macdefaults.read com.apple.CrashReporter DialogType salt '*' macdefaults.read NSGlobalDomain ApplePersistence domain The name of the domain to read from key The key of the given domain to read from user The user to read the defaults as """ cmd = 'defaults read "{}" "{}"'.format(domain, key) return __salt__["cmd.run"](cmd, runas=user) def delete(domain, key, user=None): """ Delete a default from the system CLI Example: .. code-block:: bash salt '*' macdefaults.delete com.apple.CrashReporter DialogType salt '*' macdefaults.delete NSGlobalDomain ApplePersistence domain The name of the domain to delete from key The key of the given domain to delete user The user to delete the defaults with """ cmd = 'defaults delete "{}" "{}"'.format(domain, key) return __salt__["cmd.run_all"](cmd, runas=user, output_loglevel="debug")
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/macdefaults.py
0.492188
0.214733
macdefaults.py
pypi
import logging CAN_USE_NAMECHEAP = True try: import salt.utils.namecheap except ImportError: CAN_USE_NAMECHEAP = False log = logging.getLogger(__name__) def __virtual__(): """ Check to make sure requests and xml are installed and requests """ if CAN_USE_NAMECHEAP: return "namecheap_domains" return False def reactivate(domain_name): """ Try to reactivate the expired domain name Returns the following information: - Whether or not the domain was reactivated successfully - The amount charged for reactivation - The order ID - The transaction ID CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.reactivate my-domain-name """ opts = salt.utils.namecheap.get_opts("namecheap.domains.reactivate") opts["DomainName"] = domain_name response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return {} domainreactivateresult = response_xml.getElementsByTagName( "DomainReactivateResult" )[0] return salt.utils.namecheap.xml_to_dict(domainreactivateresult) def renew(domain_name, years, promotion_code=None): """ Try to renew the specified expiring domain name for a specified number of years domain_name The domain name to be renewed years Number of years to renew Returns the following information: - Whether or not the domain was renewed successfully - The domain ID - The order ID - The transaction ID - The amount charged for renewal CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.renew my-domain-name 5 """ opts = salt.utils.namecheap.get_opts("namecheap.domains.renew") opts["DomainName"] = domain_name opts["Years"] = years if promotion_code is not None: opts["PromotionCode"] = promotion_code response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return {} domainrenewresult = response_xml.getElementsByTagName("DomainRenewResult")[0] return salt.utils.namecheap.xml_to_dict(domainrenewresult) def create(domain_name, years, **kwargs): """ Try to register the specified domain name domain_name The domain name to be registered years Number of years to register Returns the following information: - Whether or not the domain was renewed successfully - Whether or not WhoisGuard is enabled - Whether or not registration is instant - The amount charged for registration - The domain ID - The order ID - The transaction ID CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.create my-domain-name 2 """ idn_codes = ( "afr", "alb", "ara", "arg", "arm", "asm", "ast", "ave", "awa", "aze", "bak", "bal", "ban", "baq", "bas", "bel", "ben", "bho", "bos", "bul", "bur", "car", "cat", "che", "chi", "chv", "cop", "cos", "cze", "dan", "div", "doi", "dut", "eng", "est", "fao", "fij", "fin", "fre", "fry", "geo", "ger", "gla", "gle", "gon", "gre", "guj", "heb", "hin", "hun", "inc", "ind", "inh", "isl", "ita", "jav", "jpn", "kas", "kaz", "khm", "kir", "kor", "kur", "lao", "lav", "lit", "ltz", "mal", "mkd", "mlt", "mol", "mon", "mri", "msa", "nep", "nor", "ori", "oss", "pan", "per", "pol", "por", "pus", "raj", "rum", "rus", "san", "scr", "sin", "slo", "slv", "smo", "snd", "som", "spa", "srd", "srp", "swa", "swe", "syr", "tam", "tel", "tgk", "tha", "tib", "tur", "ukr", "urd", "uzb", "vie", "wel", "yid", ) require_opts = [ "AdminAddress1", "AdminCity", "AdminCountry", "AdminEmailAddress", "AdminFirstName", "AdminLastName", "AdminPhone", "AdminPostalCode", "AdminStateProvince", "AuxBillingAddress1", "AuxBillingCity", "AuxBillingCountry", "AuxBillingEmailAddress", "AuxBillingFirstName", "AuxBillingLastName", "AuxBillingPhone", "AuxBillingPostalCode", "AuxBillingStateProvince", "RegistrantAddress1", "RegistrantCity", "RegistrantCountry", "RegistrantEmailAddress", "RegistrantFirstName", "RegistrantLastName", "RegistrantPhone", "RegistrantPostalCode", "RegistrantStateProvince", "TechAddress1", "TechCity", "TechCountry", "TechEmailAddress", "TechFirstName", "TechLastName", "TechPhone", "TechPostalCode", "TechStateProvince", "Years", ] opts = salt.utils.namecheap.get_opts("namecheap.domains.create") opts["DomainName"] = domain_name opts["Years"] = str(years) def add_to_opts(opts_dict, kwargs, value, suffix, prefices): for prefix in prefices: nextkey = prefix + suffix if nextkey not in kwargs: opts_dict[nextkey] = value for key, value in kwargs.items(): if key.startswith("Registrant"): add_to_opts( opts, kwargs, value, key[10:], ["Tech", "Admin", "AuxBilling", "Billing"], ) if key.startswith("Tech"): add_to_opts( opts, kwargs, value, key[4:], ["Registrant", "Admin", "AuxBilling", "Billing"], ) if key.startswith("Admin"): add_to_opts( opts, kwargs, value, key[5:], ["Registrant", "Tech", "AuxBilling", "Billing"], ) if key.startswith("AuxBilling"): add_to_opts( opts, kwargs, value, key[10:], ["Registrant", "Tech", "Admin", "Billing"], ) if key.startswith("Billing"): add_to_opts( opts, kwargs, value, key[7:], ["Registrant", "Tech", "Admin", "AuxBilling"], ) if key == "IdnCode" and key not in idn_codes: log.error("Invalid IdnCode") raise Exception("Invalid IdnCode") opts[key] = value for requiredkey in require_opts: if requiredkey not in opts: log.error("Missing required parameter '%s'", requiredkey) raise Exception("Missing required parameter '" + requiredkey + "'") response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return {} domainresult = response_xml.getElementsByTagName("DomainCreateResult")[0] return salt.utils.namecheap.atts_to_dict(domainresult) def check(*domains_to_check): """ Checks the availability of domains domains_to_check array of strings List of domains to check Returns a dictionary mapping the each domain name to a boolean denoting whether or not it is available. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.check domain-to-check """ opts = salt.utils.namecheap.get_opts("namecheap.domains.check") opts["DomainList"] = ",".join(domains_to_check) response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domains_checked = {} for result in response_xml.getElementsByTagName("DomainCheckResult"): available = result.getAttribute("Available") domains_checked[ result.getAttribute("Domain").lower() ] = salt.utils.namecheap.string_to_value(available) return domains_checked def get_info(domain_name): """ Returns information about the requested domain returns a dictionary of information about the domain_name domain_name string Domain name to get information about CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.get_info my-domain-name """ opts = salt.utils.namecheap.get_opts("namecheap.domains.getinfo") opts["DomainName"] = domain_name response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return [] domaingetinforesult = response_xml.getElementsByTagName("DomainGetInfoResult")[0] return salt.utils.namecheap.xml_to_dict(domaingetinforesult) def get_tld_list(): """ Returns a list of TLDs as objects CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.get_tld_list """ response_xml = salt.utils.namecheap.get_request( salt.utils.namecheap.get_opts("namecheap.domains.gettldlist") ) if response_xml is None: return [] tldresult = response_xml.getElementsByTagName("Tlds")[0] tlds = [] for e in tldresult.getElementsByTagName("Tld"): tld = salt.utils.namecheap.atts_to_dict(e) tld["data"] = e.firstChild.data categories = [] subcategories = e.getElementsByTagName("Categories")[0] for c in subcategories.getElementsByTagName("TldCategory"): categories.append(salt.utils.namecheap.atts_to_dict(c)) tld["categories"] = categories tlds.append(tld) return tlds def get_list(list_type=None, search_term=None, page=None, page_size=None, sort_by=None): """ Returns a list of domains for the particular user as a list of objects offset by ``page`` length of ``page_size`` list_type : ALL One of ``ALL``, ``EXPIRING``, ``EXPIRED`` search_term Keyword to look for on the domain list page : 1 Number of result page to return page_size : 20 Number of domains to be listed per page (minimum: ``10``, maximum: ``100``) sort_by One of ``NAME``, ``NAME_DESC``, ``EXPIREDATE``, ``EXPIREDATE_DESC``, ``CREATEDATE``, or ``CREATEDATE_DESC`` CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.get_list """ opts = salt.utils.namecheap.get_opts("namecheap.domains.getList") if list_type is not None: if list_type not in ["ALL", "EXPIRING", "EXPIRED"]: log.error("Invalid option for list_type") raise Exception("Invalid option for list_type") opts["ListType"] = list_type if search_term is not None: if len(search_term) > 70: log.warning("search_term trimmed to first 70 characters") search_term = search_term[0:70] opts["SearchTerm"] = search_term if page is not None: opts["Page"] = page if page_size is not None: if page_size > 100 or page_size < 10: log.error("Invalid option for page") raise Exception("Invalid option for page") opts["PageSize"] = page_size if sort_by is not None: if sort_by not in [ "NAME", "NAME_DESC", "EXPIREDATE", "EXPIREDATE_DESC", "CREATEDATE", "CREATEDATE_DESC", ]: log.error("Invalid option for sort_by") raise Exception("Invalid option for sort_by") opts["SortBy"] = sort_by response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return [] domainresult = response_xml.getElementsByTagName("DomainGetListResult")[0] domains = [] for d in domainresult.getElementsByTagName("Domain"): domains.append(salt.utils.namecheap.atts_to_dict(d)) return domains
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/namecheap_domains.py
0.596903
0.219149
namecheap_domains.py
pypi
import os def running_service_owners( exclude=("/dev", "/home", "/media", "/proc", "/run", "/sys/", "/tmp", "/var") ): """ Determine which packages own the currently running services. By default, excludes files whose full path starts with ``/dev``, ``/home``, ``/media``, ``/proc``, ``/run``, ``/sys``, ``/tmp`` and ``/var``. This can be overridden by passing in a new list to ``exclude``. CLI Example: .. code-block:: bash salt myminion introspect.running_service_owners """ error = {} if "pkg.owner" not in __salt__: error["Unsupported Package Manager"] = ( "The module for the package manager on this system does not " "support looking up which package(s) owns which file(s)" ) if "file.open_files" not in __salt__: error["Unsupported File Module"] = ( "The file module on this system does not " "support looking up open files on the system" ) if error: return {"Error": error} ret = {} open_files = __salt__["file.open_files"]() execs = __salt__["service.execs"]() for path in open_files: ignore = False for bad_dir in exclude: if path.startswith(bad_dir): ignore = True if ignore: continue if not os.access(path, os.X_OK): continue for service in execs: if path == execs[service]: pkg = __salt__["pkg.owner"](path) ret[service] = next(iter(pkg.values())) return ret def enabled_service_owners(): """ Return which packages own each of the services that are currently enabled. CLI Example: .. code-block:: bash salt myminion introspect.enabled_service_owners """ error = {} if "pkg.owner" not in __salt__: error["Unsupported Package Manager"] = ( "The module for the package manager on this system does not " "support looking up which package(s) owns which file(s)" ) if "service.show" not in __salt__: error["Unsupported Service Manager"] = ( "The module for the service manager on this system does not " "support showing descriptive service data" ) if error: return {"Error": error} ret = {} services = __salt__["service.get_enabled"]() for service in services: data = __salt__["service.show"](service) if "ExecStart" not in data: continue start_cmd = data["ExecStart"]["path"] pkg = __salt__["pkg.owner"](start_cmd) ret[service] = next(iter(pkg.values())) return ret def service_highstate(requires=True): """ Return running and enabled services in a highstate structure. By default also returns package dependencies for those services, which means that package definitions must be created outside this function. To drop the package dependencies, set ``requires`` to False. CLI Example: .. code-block:: bash salt myminion introspect.service_highstate salt myminion introspect.service_highstate requires=False """ ret = {} running = running_service_owners() for service in running: ret[service] = {"service": ["running"]} if requires: ret[service]["service"].append({"require": {"pkg": running[service]}}) enabled = enabled_service_owners() for service in enabled: if service in ret: ret[service]["service"].append({"enabled": True}) else: ret[service] = {"service": [{"enabled": True}]} if requires: exists = False for item in ret[service]["service"]: if isinstance(item, dict) and next(iter(item.keys())) == "require": exists = True if not exists: ret[service]["service"].append({"require": {"pkg": enabled[service]}}) return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/introspect.py
0.545286
0.249068
introspect.py
pypi
import fnmatch import logging import os import re import salt.utils.data import salt.utils.files log = logging.getLogger(__name__) # XXX enable/disable support would be nice # Define the module's virtual name __virtualname__ = "service" __func_alias__ = {"reload_": "reload"} def __virtual__(): """ Only work on OpenBSD """ if __grains__["os"] == "OpenBSD" and os.path.exists("/etc/rc.d/rc.subr"): krel = list(list(map(int, __grains__["kernelrelease"].split(".")))) # The -f flag, used to force a script to run even if disabled, # was added after the 5.0 release. # the rcctl(8) command is the preferred way to manage services. if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0): if not os.path.exists("/usr/sbin/rcctl"): return __virtualname__ return ( False, "The openbsdservice execution module cannot be loaded: " "only available on OpenBSD systems.", ) def start(name): """ Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> """ cmd = "/etc/rc.d/{} -f start".format(name) return not __salt__["cmd.retcode"](cmd) def stop(name): """ Stop the specified service CLI Example: .. code-block:: bash salt '*' service.stop <service name> """ cmd = "/etc/rc.d/{} -f stop".format(name) return not __salt__["cmd.retcode"](cmd) def restart(name): """ Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> """ cmd = "/etc/rc.d/{} -f restart".format(name) return not __salt__["cmd.retcode"](cmd) def status(name, sig=None): """ Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Signature to use to find the service via ps Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> [service signature] """ if sig: return bool(__salt__["status.pid"](sig)) contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = "/etc/rc.d/{} -f check".format(service) results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True) if contains_globbing: return results return results[name] def reload_(name): """ .. versionadded:: 2014.7.0 Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> """ cmd = "/etc/rc.d/{} -f reload".format(name) return not __salt__["cmd.retcode"](cmd) service_flags_regex = re.compile(r"^\s*(\w[\d\w]*)_flags=(?:(NO)|.*)$") pkg_scripts_regex = re.compile(r"^\s*pkg_scripts=\'(.*)\'$") start_daemon_call_regex = re.compile(r"(\s*start_daemon(?!\(\)))") start_daemon_parameter_regex = re.compile(r"(?:\s+(\w[\w\d]*))") def _get_rc(): """ Returns a dict where the key is the daemon's name and the value a boolean indicating its status (True: enabled or False: disabled). Check the daemons started by the system in /etc/rc and configured in /etc/rc.conf and /etc/rc.conf.local. Also add to the dict all the localy enabled daemons via $pkg_scripts. """ daemons_flags = {} try: # now read the system startup script /etc/rc # to know what are the system enabled daemons with salt.utils.files.fopen("/etc/rc", "r") as handle: lines = salt.utils.data.decode(handle.readlines()) except OSError: log.error("Unable to read /etc/rc") else: for line in lines: match = start_daemon_call_regex.match(line) if match: # the matched line is a call to start_daemon() # we remove the function name line = line[len(match.group(1)) :] # we retrieve each daemon name from the parameters of start_daemon() for daemon in start_daemon_parameter_regex.findall(line): # mark it as enabled daemons_flags[daemon] = True # this will execute rc.conf and rc.conf.local # used in /etc/rc at boot to start the daemons variables = __salt__["cmd.run"]( "(. /etc/rc.conf && set)", clean_env=True, output_loglevel="quiet", python_shell=True, ).split("\n") for var in variables: match = service_flags_regex.match(var) if match: # the matched var look like daemon_name_flags=, we test its assigned value # NO: disabled, everything else: enabled # do not create a new key if the service hasn't been found in /etc/rc, see $pkg_scripts if match.group(2) == "NO": daemons_flags[match.group(1)] = False else: match = pkg_scripts_regex.match(var) if match: # the matched var is pkg_scripts # we can retrieve the name of each localy enabled daemon that wasn't hand started via /etc/rc for daemon in match.group(1).split(): # create a new key and mark it as enabled daemons_flags[daemon] = True return daemons_flags def available(name): """ .. versionadded:: 2014.7.0 Returns ``True`` if the specified service is available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.available sshd """ path = "/etc/rc.d/{}".format(name) return os.path.isfile(path) and os.access(path, os.X_OK) def missing(name): """ .. versionadded:: 2014.7.0 The inverse of service.available. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd """ return not available(name) def get_all(): """ .. versionadded:: 2014.7.0 Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all """ services = [] if not os.path.isdir("/etc/rc.d"): return services for service in os.listdir("/etc/rc.d"): # this will remove rc.subr and all non executable files if available(service): services.append(service) return sorted(services) def get_enabled(): """ .. versionadded:: 2014.7.0 Return a list of service that are enabled on boot CLI Example: .. code-block:: bash salt '*' service.get_enabled """ services = [] for daemon, is_enabled in _get_rc().items(): if is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services)) def enabled(name, **kwargs): """ .. versionadded:: 2014.7.0 Return True if the named service is enabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.enabled <service name> """ return name in get_enabled() def get_disabled(): """ .. versionadded:: 2014.7.0 Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled """ services = [] for daemon, is_enabled in _get_rc().items(): if not is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services)) def disabled(name): """ .. versionadded:: 2014.7.0 Return True if the named service is disabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.disabled <service name> """ return name in get_disabled()
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/openbsdservice.py
0.55941
0.153486
openbsdservice.py
pypi
import logging import os import pprint import shlex import uuid import salt.syspaths import salt.utils.kickstart import salt.utils.path import salt.utils.preseed import salt.utils.stringutils import salt.utils.validate.path import salt.utils.yast from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) CMD_MAP = { "yum": ("yum", "rpm"), "deb": ("debootstrap",), "pacman": ("pacman",), } EPEL_URL = ( "http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm" ) def __virtual__(): """ By default, this will be available on all platforms; but not all distros will necessarily be supported """ return True def bootstrap( platform, root, img_format="dir", fs_format="ext2", fs_opts=None, arch=None, flavor=None, repo_url=None, static_qemu=None, img_size=None, mount_dir=None, pkg_cache=None, pkgs=None, exclude_pkgs=None, epel_url=EPEL_URL, ): """ Create an image for a specific platform. Please note that this function *MUST* be run as root, as images that are created make files belonging to root. platform Which platform to use to create the image. Currently supported platforms are rpm, deb and pacman. root Local path to create the root of the image filesystem. img_format Which format to create the image in. By default, just copies files into a directory on the local filesystem (``dir``). Future support will exist for ``sparse``. fs_format When using a non-``dir`` ``img_format``, which filesystem to format the image to. By default, ``ext2``. fs_opts When using a non-``dir`` ``img_format``, a dict of opts may be specified. arch Architecture to install packages for, if supported by the underlying bootstrap tool. Currently only used for deb. flavor Which flavor of operating system to install. This correlates to a specific directory on the distribution repositories. For instance, ``wheezy`` on Debian. repo_url Mainly important for Debian-based repos. Base URL for the mirror to install from. (e.x.: http://ftp.debian.org/debian/) static_qemu Local path to the static qemu binary required for this arch. (e.x.: /usr/bin/qemu-amd64-static) pkg_confs The location of the conf files to copy into the image, to point the installer to the right repos and configuration. img_size If img_format is not ``dir``, then the size of the image must be specified. mount_dir If img_format is not ``dir``, then the image must be mounted somewhere. If the ``mount_dir`` is not specified, then it will be created at ``/opt/salt-genesis.<random_uuid>``. This directory will be unmounted and removed when the process is finished. pkg_cache This points to a directory containing a cache of package files to be copied to the image. It does not need to be specified. pkgs A list of packages to be installed on this image. For RedHat, this will include ``yum``, ``centos-release`` and ``iputils`` by default. exclude_pkgs A list of packages to be excluded. If you do not want to install the defaults, you need to include them in this list. epel_url The URL to download the EPEL release package from. CLI Examples: .. code-block:: bash salt myminion genesis.bootstrap pacman /root/arch salt myminion genesis.bootstrap rpm /root/redhat salt myminion genesis.bootstrap deb /root/wheezy arch=amd64 \ flavor=wheezy static_qemu=/usr/bin/qemu-x86_64-static """ if img_format not in ("dir", "sparse"): raise SaltInvocationError('The img_format must be "sparse" or "dir"') if img_format == "dir": # We can just use the root as the root if not __salt__["file.directory_exists"](root): try: __salt__["file.mkdir"](root) except Exception as exc: # pylint: disable=broad-except return {"Error": salt.utils.stringutils.to_unicode(pprint.pformat(exc))} elif img_format == "sparse": if not img_size: raise SaltInvocationError("An img_size must be specified for a sparse file") if not mount_dir: mount_dir = "/opt/salt-genesis.{}".format(uuid.uuid4()) __salt__["file.mkdir"](mount_dir, "root", "root", "755") __salt__["cmd.run"](("fallocate", "-l", img_size, root), python_shell=False) _mkpart(root, fs_format, fs_opts, mount_dir) loop1 = __salt__["cmd.run"]("losetup -f") log.debug("First loop device is %s", loop1) __salt__["cmd.run"]("losetup {} {}".format(loop1, root)) loop2 = __salt__["cmd.run"]("losetup -f") log.debug("Second loop device is %s", loop2) start = str(2048 * 2048) __salt__["cmd.run"]("losetup -o {} {} {}".format(start, loop2, loop1)) __salt__["mount.mount"](mount_dir, loop2) _populate_cache(platform, pkg_cache, mount_dir) if mount_dir: root = mount_dir if pkgs is None: pkgs = [] if exclude_pkgs is None: exclude_pkgs = [] if platform in ("rpm", "yum"): _bootstrap_yum( root, pkgs=pkgs, exclude_pkgs=exclude_pkgs, epel_url=epel_url, ) elif platform == "deb": _bootstrap_deb( root, arch=arch, flavor=flavor, repo_url=repo_url, static_qemu=static_qemu, pkgs=pkgs, exclude_pkgs=exclude_pkgs, ) elif platform == "pacman": _bootstrap_pacman( root, img_format=img_format, pkgs=pkgs, exclude_pkgs=exclude_pkgs, ) if img_format != "dir": blkinfo = __salt__["disk.blkid"](loop2) __salt__["file.replace"]( "{}/boot/grub/grub.cfg".format(mount_dir), "ad4103fa-d940-47ca-8506-301d8071d467", # This seems to be the default blkinfo[loop2]["UUID"], ) __salt__["mount.umount"](root) __salt__["cmd.run"]("losetup -d {}".format(loop2)) __salt__["cmd.run"]("losetup -d {}".format(loop1)) __salt__["file.rmdir"](mount_dir) def _mkpart(root, fs_format, fs_opts, mount_dir): """ Make a partition, and make it bootable .. versionadded:: 2015.8.0 """ __salt__["partition.mklabel"](root, "msdos") loop1 = __salt__["cmd.run"]("losetup -f") log.debug("First loop device is %s", loop1) __salt__["cmd.run"]("losetup {} {}".format(loop1, root)) part_info = __salt__["partition.list"](loop1) start = str(2048 * 2048) + "B" end = part_info["info"]["size"] __salt__["partition.mkpart"](loop1, "primary", start=start, end=end) __salt__["partition.set"](loop1, "1", "boot", "on") part_info = __salt__["partition.list"](loop1) loop2 = __salt__["cmd.run"]("losetup -f") log.debug("Second loop device is %s", loop2) start = start.rstrip("B") __salt__["cmd.run"]("losetup -o {} {} {}".format(start, loop2, loop1)) _mkfs(loop2, fs_format, fs_opts) __salt__["mount.mount"](mount_dir, loop2) __salt__["cmd.run"]( ( "grub-install", "--target=i386-pc", "--debug", "--no-floppy", "--modules=part_msdos linux", "--boot-directory={}/boot".format(mount_dir), loop1, ), python_shell=False, ) __salt__["mount.umount"](mount_dir) __salt__["cmd.run"]("losetup -d {}".format(loop2)) __salt__["cmd.run"]("losetup -d {}".format(loop1)) return part_info def _mkfs(root, fs_format, fs_opts=None): """ Make a filesystem using the appropriate module .. versionadded:: 2015.8.0 """ if fs_opts is None: fs_opts = {} if fs_format in ("ext2", "ext3", "ext4"): __salt__["extfs.mkfs"](root, fs_format, **fs_opts) elif fs_format in ("btrfs",): __salt__["btrfs.mkfs"](root, **fs_opts) elif fs_format in ("xfs",): __salt__["xfs.mkfs"](root, **fs_opts) def _populate_cache(platform, pkg_cache, mount_dir): """ If a ``pkg_cache`` directory is specified, then use it to populate the disk image. """ if not pkg_cache: return if not os.path.isdir(pkg_cache): return if platform == "pacman": cache_dir = "{}/var/cache/pacman/pkg".format(mount_dir) __salt__["file.mkdir"](cache_dir, "root", "root", "755") __salt__["file.copy"](pkg_cache, cache_dir, recurse=True, remove_existing=True) def _bootstrap_yum( root, pkg_confs="/etc/yum*", pkgs=None, exclude_pkgs=None, epel_url=EPEL_URL, ): """ Bootstrap an image using the yum tools root The root of the image to install to. Will be created as a directory if it does not exist. (e.x.: /root/arch) pkg_confs The location of the conf files to copy into the image, to point yum to the right repos and configuration. pkgs A list of packages to be installed on this image. For RedHat, this will include ``yum``, ``centos-release`` and ``iputils`` by default. exclude_pkgs A list of packages to be excluded. If you do not want to install the defaults, you need to include them in this list. epel_url The URL to download the EPEL release package from. TODO: Set up a pre-install overlay, to copy files into /etc/ and so on, which are required for the install to work. """ if pkgs is None: pkgs = [] elif isinstance(pkgs, str): pkgs = pkgs.split(",") default_pkgs = ("yum", "centos-release", "iputils") for pkg in default_pkgs: if pkg not in pkgs: pkgs.append(pkg) if exclude_pkgs is None: exclude_pkgs = [] elif isinstance(exclude_pkgs, str): exclude_pkgs = exclude_pkgs.split(",") for pkg in exclude_pkgs: pkgs.remove(pkg) _make_nodes(root) release_files = [rf for rf in os.listdir("/etc") if rf.endswith("release")] __salt__["cmd.run"]( "cp /etc/resolv/conf {rfs} {root}/etc".format( root=shlex.quote(root), rfs=" ".join(release_files) ) ) __salt__["cmd.run"]( "cp -r {rfs} {root}/etc".format( root=shlex.quote(root), rfs=" ".join(release_files) ) ) __salt__["cmd.run"]( "cp -r {confs} {root}/etc".format( root=shlex.quote(root), confs=shlex.quote(pkg_confs) ) ) yum_args = [ "yum", "install", "--installroot={}".format(shlex.quote(root)), "-y", ] + pkgs __salt__["cmd.run"](yum_args, python_shell=False) if "epel-release" not in exclude_pkgs: __salt__["cmd.run"]( ("rpm", "--root={}".format(shlex.quote(root)), "-Uvh", epel_url), python_shell=False, ) def _bootstrap_deb( root, arch, flavor, repo_url=None, static_qemu=None, pkgs=None, exclude_pkgs=None, ): """ Bootstrap an image using the Debian tools root The root of the image to install to. Will be created as a directory if it does not exist. (e.x.: /root/wheezy) arch Architecture of the target image. (e.x.: amd64) flavor Flavor of Debian to install. (e.x.: wheezy) repo_url Base URL for the mirror to install from. (e.x.: http://ftp.debian.org/debian/) static_qemu Local path to the static qemu binary required for this arch. (e.x.: /usr/bin/qemu-amd64-static) pkgs A list of packages to be installed on this image. exclude_pkgs A list of packages to be excluded. """ if repo_url is None: repo_url = "http://ftp.debian.org/debian/" if not salt.utils.path.which("debootstrap"): log.error("Required tool debootstrap is not installed.") return False if static_qemu and not salt.utils.validate.path.is_executable(static_qemu): log.error("Required tool qemu not present/readable at: %s", static_qemu) return False if isinstance(pkgs, (list, tuple)): pkgs = ",".join(pkgs) if isinstance(exclude_pkgs, (list, tuple)): exclude_pkgs = ",".join(exclude_pkgs) deb_args = ["debootstrap", "--foreign", "--arch", shlex.quote(arch)] if pkgs: deb_args += ["--include", shlex.quote(pkgs)] if exclude_pkgs: deb_args += ["--exclude", shlex.quote(exclude_pkgs)] deb_args += [ shlex.quote(flavor), shlex.quote(root), shlex.quote(repo_url), ] __salt__["cmd.run"](deb_args, python_shell=False) if static_qemu: __salt__["cmd.run"]( "cp {qemu} {root}/usr/bin/".format( qemu=shlex.quote(static_qemu), root=shlex.quote(root) ) ) env = { "DEBIAN_FRONTEND": "noninteractive", "DEBCONF_NONINTERACTIVE_SEEN": "true", "LC_ALL": "C", "LANGUAGE": "C", "LANG": "C", "PATH": "/sbin:/bin:/usr/bin", } __salt__["cmd.run"]( "chroot {root} /debootstrap/debootstrap --second-stage".format( root=shlex.quote(root) ), env=env, ) __salt__["cmd.run"]( "chroot {root} dpkg --configure -a".format(root=shlex.quote(root)), env=env ) def _bootstrap_pacman( root, pkg_confs="/etc/pacman*", img_format="dir", pkgs=None, exclude_pkgs=None, ): """ Bootstrap an image using the pacman tools root The root of the image to install to. Will be created as a directory if it does not exist. (e.x.: /root/arch) pkg_confs The location of the conf files to copy into the image, to point pacman to the right repos and configuration. img_format The image format to be used. The ``dir`` type needs no special treatment, but others need special treatment. pkgs A list of packages to be installed on this image. For Arch Linux, this will include ``pacman``, ``linux``, ``grub``, and ``systemd-sysvcompat`` by default. exclude_pkgs A list of packages to be excluded. If you do not want to install the defaults, you need to include them in this list. """ _make_nodes(root) if pkgs is None: pkgs = [] elif isinstance(pkgs, str): pkgs = pkgs.split(",") default_pkgs = ("pacman", "linux", "systemd-sysvcompat", "grub") for pkg in default_pkgs: if pkg not in pkgs: pkgs.append(pkg) if exclude_pkgs is None: exclude_pkgs = [] elif isinstance(exclude_pkgs, str): exclude_pkgs = exclude_pkgs.split(",") for pkg in exclude_pkgs: pkgs.remove(pkg) if img_format != "dir": __salt__["mount.mount"]("{}/proc".format(root), "/proc", fstype="", opts="bind") __salt__["mount.mount"]("{}/dev".format(root), "/dev", fstype="", opts="bind") __salt__["file.mkdir"]( "{}/var/lib/pacman/local".format(root), "root", "root", "755" ) pac_files = [rf for rf in os.listdir("/etc") if rf.startswith("pacman.")] for pac_file in pac_files: __salt__["cmd.run"]("cp -r /etc/{} {}/etc".format(pac_file, shlex.quote(root))) __salt__["file.copy"]( "/var/lib/pacman/sync", "{}/var/lib/pacman/sync".format(root), recurse=True ) pacman_args = ["pacman", "--noconfirm", "-r", shlex.quote(root), "-S"] + pkgs __salt__["cmd.run"](pacman_args, python_shell=False) if img_format != "dir": __salt__["mount.umount"]("{}/proc".format(root)) __salt__["mount.umount"]("{}/dev".format(root)) def _make_nodes(root): """ Make the minimum number of nodes inside of /dev/. Based on: https://wiki.archlinux.org/index.php/Linux_Containers """ dirs = ( ("{}/etc".format(root), "root", "root", "755"), ("{}/dev".format(root), "root", "root", "755"), ("{}/proc".format(root), "root", "root", "755"), ("{}/dev/pts".format(root), "root", "root", "755"), ("{}/dev/shm".format(root), "root", "root", "1755"), ) nodes = ( ("{}/dev/null".format(root), "c", 1, 3, "root", "root", "666"), ("{}/dev/zero".format(root), "c", 1, 5, "root", "root", "666"), ("{}/dev/random".format(root), "c", 1, 8, "root", "root", "666"), ("{}/dev/urandom".format(root), "c", 1, 9, "root", "root", "666"), ("{}/dev/tty".format(root), "c", 5, 0, "root", "root", "666"), ("{}/dev/tty0".format(root), "c", 4, 0, "root", "root", "666"), ("{}/dev/console".format(root), "c", 5, 1, "root", "root", "600"), ("{}/dev/full".format(root), "c", 1, 7, "root", "root", "666"), ("{}/dev/initctl".format(root), "p", 0, 0, "root", "root", "600"), ("{}/dev/ptmx".format(root), "c", 5, 2, "root", "root", "666"), ) for path in dirs: __salt__["file.mkdir"](*path) for path in nodes: __salt__["file.mknod"](*path) def avail_platforms(): """ Return which platforms are available CLI Example: .. code-block:: bash salt myminion genesis.avail_platforms """ ret = {} for platform in CMD_MAP: ret[platform] = True for cmd in CMD_MAP[platform]: if not salt.utils.path.which(cmd): ret[platform] = False return ret def pack(name, root, path=None, pack_format="tar", compress="bzip2"): """ Pack up a directory structure, into a specific format CLI Examples: .. code-block:: bash salt myminion genesis.pack centos /root/centos salt myminion genesis.pack centos /root/centos pack_format='tar' """ if pack_format == "tar": _tar(name, root, path, compress) def unpack(name, dest=None, path=None, pack_format="tar", compress="bz2"): """ Unpack an image into a directory structure CLI Example: .. code-block:: bash salt myminion genesis.unpack centos /root/centos """ if pack_format == "tar": _untar(name, dest, path, compress) def _tar(name, root, path=None, compress="bzip2"): """ Pack up image in a tar format """ if path is None: path = os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, "img") if not __salt__["file.directory_exists"](path): try: __salt__["file.mkdir"](path) except Exception as exc: # pylint: disable=broad-except return {"Error": salt.utils.stringutils.to_unicode(pprint.pformat(exc))} compression, ext = _compress(compress) tarfile = "{}/{}.tar.{}".format(path, name, ext) out = __salt__["archive.tar"]( options="{}pcf".format(compression), tarfile=tarfile, sources=".", dest=root, ) def _untar(name, dest=None, path=None, compress="bz2"): """ Unpack a tarball to be used as a container """ if path is None: path = os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, "img") if not dest: dest = path if not __salt__["file.directory_exists"](dest): try: __salt__["file.mkdir"](dest) except Exception as exc: # pylint: disable=broad-except return {"Error": salt.utils.stringutils.to_unicode(pprint.pformat(exc))} compression, ext = _compress(compress) tarfile = "{}/{}.tar.{}".format(path, name, ext) out = __salt__["archive.tar"]( options="{}xf".format(compression), tarfile=tarfile, dest=dest, ) def _compress(compress): """ Resolve compression flags """ if compress in ("bz2", "bzip2", "j"): compression = "j" ext = "bz2" elif compress in ("gz", "gzip", "z"): compression = "z" ext = "gz" elif compress in ("xz", "a", "J"): compression = "J" ext = "xz" return compression, ext def ldd_deps(filename, ret=None): """ Recurse through a set of dependencies reported by ``ldd``, to find associated dependencies. Please note that this does not necessarily resolve all (non-package) dependencies for a file; but it does help. CLI Example: .. code-block:: bash salt myminion genesis.ldd_deps bash salt myminion genesis.ldd_deps /bin/bash """ if not os.path.exists(filename): filename = salt.utils.path.which(filename) if ret is None: ret = [] out = __salt__["cmd.run"](("ldd", filename), python_shell=False) for line in out.splitlines(): if not line.strip(): continue dep_path = "" if "=>" in line: comps = line.split(" => ") dep_comps = comps[1].strip().split() if os.path.exists(dep_comps[0]): dep_path = dep_comps[0] else: dep_comps = line.strip().split() if os.path.exists(dep_comps[0]): dep_path = dep_comps[0] if dep_path: if dep_path not in ret: ret.append(dep_path) new_deps = ldd_deps(dep_path, ret) for dep in new_deps: if dep not in ret: ret.append(dep) return ret def mksls(fmt, src, dst=None): """ Convert an installation file/script to an SLS file. Currently supports ``kickstart``, ``preseed``, and ``autoyast``. CLI Examples: .. code-block:: bash salt <minion> genesis.mksls kickstart /path/to/kickstart.cfg salt <minion> genesis.mksls kickstart /path/to/kickstart.cfg /path/to/dest.sls .. versionadded:: 2015.8.0 """ if fmt == "kickstart": return salt.utils.kickstart.mksls(src, dst) elif fmt == "preseed": return salt.utils.preseed.mksls(src, dst) elif fmt == "autoyast": return salt.utils.yast.mksls(src, dst)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/genesis.py
0.505127
0.174129
genesis.py
pypi
import os import sys import time import salt.key import salt.utils.data # Don't shadow built-ins. __func_alias__ = {"list_": "list"} def list_(): """ Return a list of accepted, denied, unaccepted and rejected keys. This is the same output as `salt-key -L` CLI Example: .. code-block:: bash salt 'master' minion.list """ pki_dir = __salt__["config.get"]("pki_dir", "") # We have to replace the minion/master directories pki_dir = pki_dir.replace("minion", "master") # The source code below is (nearly) a copy of salt.key.Key.list_keys key_dirs = _check_minions_directories(pki_dir) ret = {} for dir_ in key_dirs: ret[os.path.basename(dir_)] = [] try: for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)): if not fn_.startswith("."): if os.path.isfile(os.path.join(dir_, fn_)): ret[os.path.basename(dir_)].append(fn_) except OSError: # key dir kind is not created yet, just skip continue return ret def _check_minions_directories(pki_dir): """ Return the minion keys directory paths. This function is a copy of salt.key.Key._check_minions_directories. """ minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC) minions_pre = os.path.join(pki_dir, salt.key.Key.PEND) minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ) minions_denied = os.path.join(pki_dir, salt.key.Key.DEN) return minions_accepted, minions_pre, minions_rejected, minions_denied def kill(timeout=15): """ Kill the salt minion. timeout int seconds to wait for the minion to die. If you have a monitor that restarts ``salt-minion`` when it dies then this is a great way to restart after a minion upgrade. CLI Example: .. code-block:: bash salt minion[12] minion.kill minion1: ---------- killed: 7874 retcode: 0 minion2: ---------- killed: 29071 retcode: 0 The result of the salt command shows the process ID of the minions and the results of a kill signal to the minion in as the ``retcode`` value: ``0`` is success, anything else is a failure. """ ret = { "killed": None, "retcode": 1, } comment = [] pid = __grains__.get("pid") if not pid: comment.append('Unable to find "pid" in grains') ret["retcode"] = salt.defaults.exitcodes.EX_SOFTWARE else: if "ps.kill_pid" not in __salt__: comment.append("Missing command: ps.kill_pid") ret["retcode"] = salt.defaults.exitcodes.EX_SOFTWARE else: # The retcode status comes from the first kill signal ret["retcode"] = int(not __salt__["ps.kill_pid"](pid)) # If the signal was successfully delivered then wait for the # process to die - check by sending signals until signal delivery # fails. if ret["retcode"]: comment.append("ps.kill_pid failed") else: for _ in range(timeout): time.sleep(1) signaled = __salt__["ps.kill_pid"](pid) if not signaled: ret["killed"] = pid break else: # The process did not exit before the timeout comment.append("Timed out waiting for minion to exit") ret["retcode"] = salt.defaults.exitcodes.EX_TEMPFAIL if comment: ret["comment"] = comment return ret def restart(): """ Kill and restart the salt minion. The configuration key ``minion_restart_command`` is an argv list for the command to restart the minion. If ``minion_restart_command`` is not specified or empty then the ``argv`` of the current process will be used. if the configuration value ``minion_restart_command`` is not set and the ``-d`` (daemonize) argument is missing from ``argv`` then the minion *will* be killed but will *not* be restarted and will require the parent process to perform the restart. This behavior is intended for managed salt minion processes. CLI Example: .. code-block:: bash salt minion[12] minion.restart minion1: ---------- comment: - Restart using process argv: - /home/omniture/install/bin/salt-minion - -d - -c - /home/omniture/install/etc/salt killed: 10070 restart: ---------- stderr: stdout: retcode: 0 minion2: ---------- comment: - Using configuration minion_restart_command: - /home/omniture/install/bin/salt-minion - --not-an-option - -d - -c - /home/omniture/install/etc/salt - Restart failed killed: 10896 restart: ---------- stderr: Usage: salt-minion salt-minion: error: no such option: --not-an-option stdout: retcode: 64 The result of the command shows the process ID of ``minion1`` that is shutdown (killed) and the results of the restart. If there is a failure in the restart it will be reflected in a non-zero ``retcode`` and possibly output in the ``stderr`` and/or ``stdout`` values along with addition information in the ``comment`` field as is demonstrated with ``minion2``. """ should_kill = True should_restart = True comment = [] ret = { "killed": None, "restart": {}, "retcode": 0, } restart_cmd = __salt__["config.get"]("minion_restart_command") if restart_cmd: comment.append("Using configuration minion_restart_command:") comment.extend([" {}".format(arg) for arg in restart_cmd]) else: if "-d" in sys.argv: restart_cmd = sys.argv comment.append("Restart using process argv:") comment.extend([" {}".format(arg) for arg in restart_cmd]) else: should_restart = False comment.append( "Not running in daemon mode - will not restart process after killing" ) if should_kill: ret.update(kill()) if "comment" in ret and ret["comment"]: if isinstance(ret["comment"], str): comment.append(ret["comment"]) else: comment.extend(ret["comment"]) if ret["retcode"]: comment.append("Kill failed - not restarting") should_restart = False if should_restart: ret["restart"] = __salt__["cmd.run_all"](restart_cmd, env=os.environ) # Do not want to mislead users to think that the returned PID from # cmd.run_all() is the PID of the new salt minion - just delete the # returned PID. if "pid" in ret["restart"]: del ret["restart"]["pid"] if ret["restart"].get("retcode", None): comment.append("Restart failed") ret["retcode"] = ret["restart"]["retcode"] if "retcode" in ret["restart"]: # Just want a single retcode del ret["restart"]["retcode"] if comment: ret["comment"] = comment return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/minion.py
0.488039
0.229622
minion.py
pypi
import logging import salt.utils.decorators as decorators import salt.utils.path import salt.utils.platform log = logging.getLogger(__name__) # Function aliases __func_alias__ = { "list_": "list", "get_": "get", "put_": "put", "delete_": "delete", } # Define the module's virtual name __virtualname__ = "mdata" @decorators.memoize def _check_mdata_list(): """ looks to see if mdata-list is present on the system """ return salt.utils.path.which("mdata-list") @decorators.memoize def _check_mdata_get(): """ looks to see if mdata-get is present on the system """ return salt.utils.path.which("mdata-get") @decorators.memoize def _check_mdata_put(): """ looks to see if mdata-put is present on the system """ return salt.utils.path.which("mdata-put") @decorators.memoize def _check_mdata_delete(): """ looks to see if mdata-delete is present on the system """ return salt.utils.path.which("mdata-delete") def __virtual__(): """ Provides mdata only on SmartOS """ if _check_mdata_list() and not salt.utils.platform.is_smartos_globalzone(): return __virtualname__ return ( False, "{} module can only be loaded on SmartOS zones".format(__virtualname__), ) def list_(): """ List available metadata CLI Example: .. code-block:: bash salt '*' mdata.list """ mdata = _check_mdata_list() if mdata: cmd = "{}".format(mdata) return __salt__["cmd.run"](cmd, ignore_retcode=True).splitlines() return {} def get_(*keyname): """ Get metadata keyname : string name of key .. note:: If no keynames are specified, we get all (public) properties CLI Example: .. code-block:: bash salt '*' mdata.get salt:role salt '*' mdata.get user-script salt:role """ mdata = _check_mdata_get() ret = {} if not keyname: keyname = list_() for k in keyname: if mdata: cmd = "{} {}".format(mdata, k) res = __salt__["cmd.run_all"](cmd, ignore_retcode=True) ret[k] = res["stdout"] if res["retcode"] == 0 else "" else: ret[k] = "" return ret def put_(keyname, val): """ Put metadata prop : string name of property val : string value to set CLI Example: .. code-block:: bash salt '*' mdata.list """ mdata = _check_mdata_put() ret = {} if mdata: cmd = "echo {2} | {0} {1}".format(mdata, keyname, val) ret = __salt__["cmd.run_all"](cmd, python_shell=True, ignore_retcode=True) return ret["retcode"] == 0 def delete_(*keyname): """ Delete metadata prop : string name of property CLI Example: .. code-block:: bash salt '*' mdata.get salt:role salt '*' mdata.get user-script salt:role """ mdata = _check_mdata_delete() valid_keynames = list_() ret = {} for k in keyname: if mdata and k in valid_keynames: cmd = "{} {}".format(mdata, k) ret[k] = __salt__["cmd.run_all"](cmd, ignore_retcode=True)["retcode"] == 0 else: ret[k] = True return ret # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/mdata.py
0.547222
0.226057
mdata.py
pypi
import logging import re import salt.utils.path log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "varnish" def __virtual__(): """ Only load the module if varnish is installed """ if salt.utils.path.which("varnishd") and salt.utils.path.which("varnishadm"): return __virtualname__ return ( False, "The varnish execution module failed to load: either varnishd or varnishadm is" " not in the path.", ) def _run_varnishadm(cmd, params=(), **kwargs): """ Execute varnishadm command return the output of the command cmd The command to run in varnishadm params Any additional args to add to the command line kwargs Additional options to pass to the salt cmd.run_all function """ cmd = ["varnishadm", cmd] cmd.extend([param for param in params if param is not None]) log.debug("Executing: %s", " ".join(cmd)) return __salt__["cmd.run_all"](cmd, python_shell=False, **kwargs) def version(): """ Return server version from varnishd -V CLI Example: .. code-block:: bash salt '*' varnish.version """ cmd = ["varnishd", "-V"] out = __salt__["cmd.run"](cmd, python_shell=False) ret = re.search(r"\(varnish-([^\)]+)\)", out).group(1) return ret def ban(ban_expression): """ Add ban to the varnish cache CLI Example: .. code-block:: bash salt '*' varnish.ban ban_expression """ return _run_varnishadm("ban", [ban_expression])["retcode"] == 0 def ban_list(): """ List varnish cache current bans CLI Example: .. code-block:: bash salt '*' varnish.ban_list """ ret = _run_varnishadm("ban.list") if ret["retcode"]: return False else: return ret["stdout"].split("\n")[1:] def purge(): """ Purge the varnish cache CLI Example: .. code-block:: bash salt '*' varnish.purge """ return ban("req.url ~ .") def param_set(param, value): """ Set a param in varnish cache CLI Example: .. code-block:: bash salt '*' varnish.param_set param value """ return _run_varnishadm("param.set", [param, str(value)])["retcode"] == 0 def param_show(param=None): """ Show params of varnish cache CLI Example: .. code-block:: bash salt '*' varnish.param_show param """ ret = _run_varnishadm("param.show", [param]) if ret["retcode"]: return False else: result = {} for line in ret["stdout"].split("\n"): m = re.search(r"^(\w+)\s+(.*)$", line) result[m.group(1)] = m.group(2) if param: # When we ask to varnishadm for a specific param, it gives full # info on what that parameter is, so we just process the first # line and we get out of the loop break return result
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/varnish.py
0.547706
0.182062
varnish.py
pypi
import time import salt.utils.http from salt.exceptions import CommandExecutionError def query(url, **kwargs): """ .. versionadded:: 2015.5.0 Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: .. autofunction:: salt.utils.http.query raise_error : True If ``False``, and if a connection cannot be made, the error will be suppressed and the body of the return will simply be ``None``. CLI Example: .. code-block:: bash salt '*' http.query http://somelink.com/ salt '*' http.query http://somelink.com/ method=POST \ params='{"key1": "val1", "key2": "val2"}' salt '*' http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' """ opts = __opts__.copy() if "opts" in kwargs: opts.update(kwargs["opts"]) del kwargs["opts"] try: return salt.utils.http.query(url=url, opts=opts, **kwargs) except Exception as exc: # pylint: disable=broad-except raise CommandExecutionError(str(exc)) def wait_for_successful_query(url, wait_for=300, **kwargs): """ Query a resource until a successful response, and decode the return data CLI Example: .. code-block:: bash salt '*' http.wait_for_successful_query http://somelink.com/ wait_for=160 request_interval=1 """ starttime = time.time() while True: caught_exception = None result = None try: result = query(url=url, **kwargs) if not result.get("Error") and not result.get("error"): return result except Exception as exc: # pylint: disable=broad-except caught_exception = exc if time.time() > starttime + wait_for: if not result and caught_exception: # workaround pylint bug https://www.logilab.org/ticket/3207 raise caught_exception # pylint: disable=E0702 return result elif "request_interval" in kwargs: # Space requests out by delaying for an interval time.sleep(kwargs["request_interval"]) def update_ca_bundle(target=None, source=None, merge_files=None): """ Update the local CA bundle file from a URL .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' http.update_ca_bundle salt '*' http.update_ca_bundle target=/path/to/cacerts.pem salt '*' http.update_ca_bundle source=https://example.com/cacerts.pem If the ``target`` is not specified, it will be pulled from the ``ca_cert`` configuration variable available to the minion. If it cannot be found there, it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``. If the ``source`` is not specified, it will be pulled from the ``ca_cert_url`` configuration variable available to the minion. If it cannot be found, it will be downloaded from the cURL website, using an http (not https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED! ``merge_files`` may also be specified, which includes a string or list of strings representing a file or files to be appended to the end of the CA bundle, once it is downloaded. CLI Example: .. code-block:: bash salt '*' http.update_ca_bundle merge_files=/path/to/mycert.pem """ if target is None: target = __salt__["config.get"]("ca_bundle", None) if source is None: source = __salt__["config.get"]("ca_bundle_url", None) return salt.utils.http.update_ca_bundle(target, source, __opts__, merge_files)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/http.py
0.737347
0.237167
http.py
pypi
import logging import re from salt.exceptions import CommandExecutionError try: import pynetbox HAS_PYNETBOX = True except ImportError: HAS_PYNETBOX = False log = logging.getLogger(__name__) AUTH_ENDPOINTS = ("secrets",) __func_alias__ = {"filter_": "filter", "get_": "get"} def __virtual__(): """ pynetbox must be installed. """ if not HAS_PYNETBOX: return ( False, "The netbox execution module cannot be loaded: " "pynetbox library is not installed.", ) else: return True def _config(): config = __salt__["config.get"]("netbox") if not config: raise CommandExecutionError( "NetBox execution module configuration could not be found" ) return config def _nb_obj(auth_required=False): pynb_kwargs = {} pynb_kwargs["token"] = _config().get("token") if auth_required: pynb_kwargs["private_key_file"] = _config().get("keyfile") return pynetbox.api(_config().get("url"), **pynb_kwargs) def _strip_url_field(input_dict): if "url" in input_dict.keys(): del input_dict["url"] for k, v in input_dict.items(): if isinstance(v, dict): _strip_url_field(v) return input_dict def _dict(iterable): if iterable: return dict(iterable) else: return {} def _add(app, endpoint, payload): """ POST a payload """ nb = _nb_obj(auth_required=True) try: return getattr(getattr(nb, app), endpoint).create(**payload) except pynetbox.RequestError as e: log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False def slugify(value): """' Slugify given value. Credit to Djangoproject https://docs.djangoproject.com/en/2.0/_modules/django/utils/text/#slugify """ value = re.sub(r"[^\w\s-]", "", value).strip().lower() return re.sub(r"[-\s]+", "-", value) def _get(app, endpoint, id=None, auth_required=False, **kwargs): """ Helper function to do a GET request to Netbox. Returns the actual pynetbox object, which allows manipulation from other functions. """ nb = _nb_obj(auth_required=auth_required) if id: item = getattr(getattr(nb, app), endpoint).get(id) else: kwargs = __utils__["args.clean_kwargs"](**kwargs) item = getattr(getattr(nb, app), endpoint).get(**kwargs) return item def _if_name_unit(if_name): if_name_split = if_name.split(".") if len(if_name_split) == 2: return if_name_split return if_name, "0" def filter_(app, endpoint, **kwargs): """ Get a list of items from NetBox. app String of netbox app, e.g., ``dcim``, ``circuits``, ``ipam`` endpoint String of app endpoint, e.g., ``sites``, ``regions``, ``devices`` kwargs Optional arguments that can be used to filter. All filter keywords are available in Netbox, which can be found by surfing to the corresponding API endpoint, and clicking Filters. e.g., ``role=router`` Returns a list of dictionaries .. code-block:: bash salt myminion netbox.filter dcim devices status=1 role=router """ ret = [] nb = _nb_obj(auth_required=True if app in AUTH_ENDPOINTS else False) nb_query = getattr(getattr(nb, app), endpoint).filter( **__utils__["args.clean_kwargs"](**kwargs) ) if nb_query: ret = [_strip_url_field(dict(i)) for i in nb_query] return ret def get_(app, endpoint, id=None, **kwargs): """ Get a single item from NetBox. app String of netbox app, e.g., ``dcim``, ``circuits``, ``ipam`` endpoint String of app endpoint, e.g., ``sites``, ``regions``, ``devices`` Returns a single dictionary To get an item based on ID. .. code-block:: bash salt myminion netbox.get dcim devices id=123 Or using named arguments that correspond with accepted filters on the NetBox endpoint. .. code-block:: bash salt myminion netbox.get dcim devices name=my-router """ return _dict( _get( app, endpoint, id=id, auth_required=True if app in AUTH_ENDPOINTS else False, **kwargs ) ) def create_manufacturer(name): """ .. versionadded:: 2019.2.0 Create a device manufacturer. name The name of the manufacturer, e.g., ``Juniper`` CLI Example: .. code-block:: bash salt myminion netbox.create_manufacturer Juniper """ nb_man = get_("dcim", "manufacturers", name=name) if nb_man: return False else: payload = {"name": name, "slug": slugify(name)} man = _add("dcim", "manufacturers", payload) if man: return {"dcim": {"manufacturers": payload}} else: return False def create_device_type(model, manufacturer): """ .. versionadded:: 2019.2.0 Create a device type. If the manufacturer doesn't exist, create a new manufacturer. model String of device model, e.g., ``MX480`` manufacturer String of device manufacturer, e.g., ``Juniper`` CLI Example: .. code-block:: bash salt myminion netbox.create_device_type MX480 Juniper """ nb_type = get_("dcim", "device-types", model=model) if nb_type: return False nb_man = get_("dcim", "manufacturers", name=manufacturer) new_man = None if not nb_man: new_man = create_manufacturer(manufacturer) payload = {"model": model, "manufacturer": nb_man["id"], "slug": slugify(model)} typ = _add("dcim", "device-types", payload) ret_dict = {"dcim": {"device-types": payload}} if new_man: ret_dict["dcim"].update(new_man["dcim"]) if typ: return ret_dict else: return False def create_device_role(role, color): """ .. versionadded:: 2019.2.0 Create a device role role String of device role, e.g., ``router`` CLI Example: .. code-block:: bash salt myminion netbox.create_device_role router """ nb_role = get_("dcim", "device-roles", name=role) if nb_role: return False else: payload = {"name": role, "slug": slugify(role), "color": color} role = _add("dcim", "device-roles", payload) if role: return {"dcim": {"device-roles": payload}} else: return False def create_platform(platform): """ .. versionadded:: 2019.2.0 Create a new device platform platform String of device platform, e.g., ``junos`` CLI Example: .. code-block:: bash salt myminion netbox.create_platform junos """ nb_platform = get_("dcim", "platforms", slug=slugify(platform)) if nb_platform: return False else: payload = {"name": platform, "slug": slugify(platform)} plat = _add("dcim", "platforms", payload) if plat: return {"dcim": {"platforms": payload}} else: return False def create_site(site): """ .. versionadded:: 2019.2.0 Create a new device site site String of device site, e.g., ``BRU`` CLI Example: .. code-block:: bash salt myminion netbox.create_site BRU """ nb_site = get_("dcim", "sites", name=site) if nb_site: return False else: payload = {"name": site, "slug": slugify(site)} site = _add("dcim", "sites", payload) if site: return {"dcim": {"sites": payload}} else: return False def create_device(name, role, model, manufacturer, site): """ .. versionadded:: 2019.2.0 Create a new device with a name, role, model, manufacturer and site. All these components need to be already in Netbox. name The name of the device, e.g., ``edge_router`` role String of device role, e.g., ``router`` model String of device model, e.g., ``MX480`` manufacturer String of device manufacturer, e.g., ``Juniper`` site String of device site, e.g., ``BRU`` CLI Example: .. code-block:: bash salt myminion netbox.create_device edge_router router MX480 Juniper BRU """ try: nb_role = get_("dcim", "device-roles", name=role) if not nb_role: return False nb_type = get_("dcim", "device-types", model=model) if not nb_type: return False nb_site = get_("dcim", "sites", name=site) if not nb_site: return False status = {"label": "Active", "value": 1} except pynetbox.RequestError as e: log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False payload = { "name": name, "display_name": name, "slug": slugify(name), "device_type": nb_type["id"], "device_role": nb_role["id"], "site": nb_site["id"], } new_dev = _add("dcim", "devices", payload) if new_dev: return {"dcim": {"devices": payload}} else: return False def update_device(name, **kwargs): """ .. versionadded:: 2019.2.0 Add attributes to an existing device, identified by name. name The name of the device, e.g., ``edge_router`` kwargs Arguments to change in device, e.g., ``serial=JN2932930`` CLI Example: .. code-block:: bash salt myminion netbox.update_device edge_router serial=JN2932920 """ kwargs = __utils__["args.clean_kwargs"](**kwargs) nb_device = _get("dcim", "devices", auth_required=True, name=name) for k, v in kwargs.items(): setattr(nb_device, k, v) try: nb_device.save() return {"dcim": {"devices": kwargs}} except pynetbox.RequestError as e: log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False def create_inventory_item( device_name, item_name, manufacturer_name=None, serial="", part_id="", description="", ): """ .. versionadded:: 2019.2.0 Add an inventory item to an existing device. device_name The name of the device, e.g., ``edge_router``. item_name String of inventory item name, e.g., ``Transceiver``. manufacturer_name String of inventory item manufacturer, e.g., ``Fiberstore``. serial String of inventory item serial, e.g., ``FS1238931``. part_id String of inventory item part id, e.g., ``740-01234``. description String of inventory item description, e.g., ``SFP+-10G-LR``. CLI Example: .. code-block:: bash salt myminion netbox.create_inventory_item edge_router Transceiver part_id=740-01234 """ nb_device = get_("dcim", "devices", name=device_name) if not nb_device: return False if manufacturer_name: nb_man = get_("dcim", "manufacturers", name=manufacturer_name) if not nb_man: create_manufacturer(manufacturer_name) nb_man = get_("dcim", "manufacturers", name=manufacturer_name) payload = { "device": nb_device["id"], "name": item_name, "description": description, "serial": serial, "part_id": part_id, "parent": None, } if manufacturer_name: payload["manufacturer"] = nb_man["id"] done = _add("dcim", "inventory-items", payload) if done: return {"dcim": {"inventory-items": payload}} else: return done def delete_inventory_item(item_id): """ .. versionadded:: 2019.2.0 Remove an item from a devices inventory. Identified by the netbox id item_id Integer of item to be deleted CLI Example: .. code-block:: bash salt myminion netbox.delete_inventory_item 1354 """ nb_inventory_item = _get("dcim", "inventory-items", auth_required=True, id=item_id) nb_inventory_item.delete() return {"DELETE": {"dcim": {"inventory-items": item_id}}} def create_interface_connection(interface_a, interface_b): """ .. versionadded:: 2019.2.0 Create an interface connection between 2 interfaces interface_a Interface id for Side A interface_b Interface id for Side B CLI Example: .. code-block:: bash salt myminion netbox.create_interface_connection 123 456 """ payload = {"interface_a": interface_a, "interface_b": interface_b} ret = _add("dcim", "interface-connections", payload) if ret: return {"dcim": {"interface-connections": {ret["id"]: payload}}} else: return ret def get_interfaces(device_name=None, **kwargs): """ .. versionadded:: 2019.2.0 Returns interfaces for a specific device using arbitrary netbox filters device_name The name of the device, e.g., ``edge_router`` kwargs Optional arguments to be used for filtering CLI Example: .. code-block:: bash salt myminion netbox.get_interfaces edge_router name="et-0/0/5" """ if not device_name: device_name = __opts__["id"] netbox_device = get_("dcim", "devices", name=device_name) return filter_("dcim", "interfaces", device_id=netbox_device["id"], **kwargs) def openconfig_interfaces(device_name=None): """ .. versionadded:: 2019.2.0 Return a dictionary structured as standardised in the `openconfig-interfaces <http://ops.openconfig.net/branches/models/master/openconfig-interfaces.html>`_ YANG model, containing physical and configuration data available in Netbox, e.g., IP addresses, MTU, enabled / disabled, etc. device_name: ``None`` The name of the device to query the interface data for. If not provided, will use the Minion ID. CLI Example: .. code-block:: bash salt '*' netbox.openconfig_interfaces salt '*' netbox.openconfig_interfaces device_name=cr1.thn.lon """ oc_if = {} interfaces = get_interfaces(device_name=device_name) ipaddresses = get_ipaddresses(device_name=device_name) for interface in interfaces: if_name, if_unit = _if_name_unit(interface["name"]) if if_name not in oc_if: oc_if[if_name] = { "config": {"name": if_name}, "subinterfaces": {"subinterface": {}}, } if if_unit == "0": oc_if[if_name]["config"]["enabled"] = interface["enabled"] if interface["description"]: if if_name == interface["name"]: # When that's a real unit 0 interface # Otherwise it will inherit the description from the subif oc_if[if_name]["config"]["description"] = str( interface["description"] ) else: subif_descr = { "subinterfaces": { "subinterface": { if_unit: { "config": { "description": str(interface["description"]) } } } } } oc_if[if_name] = __utils__["dictupdate.update"]( oc_if[if_name], subif_descr ) if interface["mtu"]: oc_if[if_name]["config"]["mtu"] = int(interface["mtu"]) else: oc_if[if_name]["subinterfaces"]["subinterface"][if_unit] = { "config": {"index": int(if_unit), "enabled": interface["enabled"]} } if interface["description"]: oc_if[if_name]["subinterfaces"]["subinterface"][if_unit]["config"][ "description" ] = str(interface["description"]) for ipaddress in ipaddresses: ip, prefix_length = ipaddress["address"].split("/") if_name = ipaddress["interface"]["name"] if_name, if_unit = _if_name_unit(if_name) ipvkey = "ipv{}".format(ipaddress["family"]) if if_unit not in oc_if[if_name]["subinterfaces"]["subinterface"]: oc_if[if_name]["subinterfaces"]["subinterface"][if_unit] = { "config": {"index": int(if_unit), "enabled": True} } if ipvkey not in oc_if[if_name]["subinterfaces"]["subinterface"][if_unit]: oc_if[if_name]["subinterfaces"]["subinterface"][if_unit][ipvkey] = { "addresses": {"address": {}} } oc_if[if_name]["subinterfaces"]["subinterface"][if_unit][ipvkey]["addresses"][ "address" ][ip] = {"config": {"ip": ip, "prefix_length": int(prefix_length)}} return {"interfaces": {"interface": oc_if}} def openconfig_lacp(device_name=None): """ .. versionadded:: 2019.2.0 Return a dictionary structured as standardised in the `openconfig-lacp <http://ops.openconfig.net/branches/models/master/openconfig-lacp.html>`_ YANG model, with configuration data for Link Aggregation Control Protocol (LACP) for aggregate interfaces. .. note:: The ``interval`` and ``lacp_mode`` keys have the values set as ``SLOW`` and ``ACTIVE`` respectively, as this data is not currently available in Netbox, therefore defaulting to the values defined in the standard. See `interval <http://ops.openconfig.net/branches/models/master/docs/openconfig-lacp.html#lacp-interfaces-interface-config-interval>`_ and `lacp-mode <http://ops.openconfig.net/branches/models/master/docs/openconfig-lacp.html#lacp-interfaces-interface-config-lacp-mode>`_ for further details. device_name: ``None`` The name of the device to query the LACP information for. If not provided, will use the Minion ID. CLI Example: .. code-block:: bash salt '*' netbox.openconfig_lacp salt '*' netbox.openconfig_lacp device_name=cr1.thn.lon """ oc_lacp = {} interfaces = get_interfaces(device_name=device_name) for interface in interfaces: if not interface["lag"]: continue if_name, if_unit = _if_name_unit(interface["name"]) parent_if = interface["lag"]["name"] if parent_if not in oc_lacp: oc_lacp[parent_if] = { "config": { "name": parent_if, "interval": "SLOW", "lacp_mode": "ACTIVE", }, "members": {"member": {}}, } oc_lacp[parent_if]["members"]["member"][if_name] = {} return {"lacp": {"interfaces": {"interface": oc_lacp}}} def create_interface( device_name, interface_name, mac_address=None, description=None, enabled=None, lag=None, lag_parent=None, form_factor=None, ): """ .. versionadded:: 2019.2.0 Attach an interface to a device. If not all arguments are provided, they will default to Netbox defaults. device_name The name of the device, e.g., ``edge_router`` interface_name The name of the interface, e.g., ``TenGigE0/0/0/0`` mac_address String of mac address, e.g., ``50:87:89:73:92:C8`` description String of interface description, e.g., ``NTT`` enabled String of boolean interface status, e.g., ``True`` lag: Boolean of interface lag status, e.g., ``True`` lag_parent String of interface lag parent name, e.g., ``ae13`` form_factor Integer of form factor id, obtained through _choices API endpoint, e.g., ``200`` CLI Example: .. code-block:: bash salt myminion netbox.create_interface edge_router ae13 description="Core uplink" """ nb_device = get_("dcim", "devices", name=device_name) if not nb_device: return False if lag_parent: lag_interface = get_( "dcim", "interfaces", device_id=nb_device["id"], name=lag_parent ) if not lag_interface: return False if not description: description = "" if not enabled: enabled = "false" # Set default form factor to 1200. This maps to SFP+ (10GE). This should be addressed by # the _choices endpoint. payload = { "device": nb_device["id"], "name": interface_name, "description": description, "enabled": enabled, "form_factor": 1200, } if form_factor is not None: payload["form_factor"] = form_factor if lag: payload["form_factor"] = 200 if lag_parent: payload["lag"] = lag_interface["id"] if mac_address: payload["mac_address"] = mac_address nb_interface = get_( "dcim", "interfaces", device_id=nb_device["id"], name=interface_name ) if not nb_interface: nb_interface = _add("dcim", "interfaces", payload) if nb_interface: return {"dcim": {"interfaces": {nb_interface["id"]: payload}}} else: return nb_interface def update_interface(device_name, interface_name, **kwargs): """ .. versionadded:: 2019.2.0 Update an existing interface with new attributes. device_name The name of the device, e.g., ``edge_router`` interface_name The name of the interface, e.g., ``ae13`` kwargs Arguments to change in interface, e.g., ``mac_address=50:87:69:53:32:D0`` CLI Example: .. code-block:: bash salt myminion netbox.update_interface edge_router ae13 mac_address=50:87:69:53:32:D0 """ nb_device = get_("dcim", "devices", name=device_name) nb_interface = _get( "dcim", "interfaces", auth_required=True, device_id=nb_device["id"], name=interface_name, ) if not nb_device: return False if not nb_interface: return False else: for k, v in __utils__["args.clean_kwargs"](**kwargs).items(): setattr(nb_interface, k, v) try: nb_interface.save() return {"dcim": {"interfaces": {nb_interface.id: dict(nb_interface)}}} except pynetbox.RequestError as e: log.error("%s, %s, %s", e.req.request.headers, e.request_body, e.error) return False def delete_interface(device_name, interface_name): """ .. versionadded:: 2019.2.0 Delete an interface from a device. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface, e.g., ``ae13`` CLI Example: .. code-block:: bash salt myminion netbox.delete_interface edge_router ae13 """ nb_device = get_("dcim", "devices", name=device_name) nb_interface = _get( "dcim", "interfaces", auth_required=True, device_id=nb_device["id"], name=interface_name, ) if nb_interface: nb_interface.delete() return { "DELETE": {"dcim": {"interfaces": {nb_interface.id: nb_interface.name}}} } return False def make_interface_lag(device_name, interface_name): """ .. versionadded:: 2019.2.0 Update an interface to be a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_lag edge_router ae13 """ return update_interface(device_name, interface_name, form_factor=200) def make_interface_child(device_name, interface_name, parent_name): """ .. versionadded:: 2019.2.0 Set an interface as part of a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface to be attached to LAG, e.g., ``xe-1/0/2``. parent_name The name of the LAG interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13 """ nb_device = get_("dcim", "devices", name=device_name) nb_parent = get_("dcim", "interfaces", device_id=nb_device["id"], name=parent_name) if nb_device and nb_parent: return update_interface(device_name, interface_name, lag=nb_parent["id"]) else: return False def get_ipaddresses(device_name=None, **kwargs): """ .. versionadded:: 2019.2.0 Filters for an IP address using specified filters device_name The name of the device to check for the IP address kwargs Optional arguments that can be used to filter, e.g., ``family=4`` CLI Example: .. code-block:: bash salt myminion netbox.get_ipaddresses device_name family=4 """ if not device_name: device_name = __opts__["id"] netbox_device = get_("dcim", "devices", name=device_name) return filter_("ipam", "ip-addresses", device_id=netbox_device["id"], **kwargs) def create_ipaddress(ip_address, family, device=None, interface=None): """ .. versionadded:: 2019.2.0 Add an IP address, and optionally attach it to an interface. ip_address The IP address and CIDR, e.g., ``192.168.1.1/24`` family Integer of IP family, e.g., ``4`` device The name of the device to attach IP to, e.g., ``edge_router`` interface The name of the interface to attach IP to, e.g., ``ae13`` CLI Example: .. code-block:: bash salt myminion netbox.create_ipaddress 192.168.1.1/24 4 device=edge_router interface=ae13 """ nb_addr = None payload = {"family": family, "address": ip_address} if interface and device: nb_device = get_("dcim", "devices", name=device) if not nb_device: return False nb_interface = get_( "dcim", "interfaces", device_id=nb_device["id"], name=interface ) if not nb_interface: return False nb_addr = get_( "ipam", "ip-addresses", q=ip_address, interface_id=nb_interface["id"], family=family, ) if nb_addr: log.error(nb_addr) return False else: payload["interface"] = nb_interface["id"] ipaddr = _add("ipam", "ip-addresses", payload) if ipaddr: return {"ipam": {"ip-addresses": payload}} else: return ipaddr def delete_ipaddress(ipaddr_id): """ .. versionadded:: 2019.2.0 Delete an IP address. IP addresses in Netbox are a combination of address and the interface it is assigned to. id The Netbox id for the IP address. CLI Example: .. code-block:: bash salt myminion netbox.delete_ipaddress 9002 """ nb_ipaddr = _get("ipam", "ip-addresses", auth_required=True, id=ipaddr_id) if nb_ipaddr: nb_ipaddr.delete() return {"DELETE": {"ipam": {"ip-address": ipaddr_id}}} return False def create_circuit_provider(name, asn=None): """ .. versionadded:: 2019.2.0 Create a new Netbox circuit provider name The name of the circuit provider asn The ASN of the circuit provider CLI Example: .. code-block:: bash salt myminion netbox.create_circuit_provider Telia 1299 """ nb_circuit_provider = get_("circuits", "providers", name=name) payload = {} if nb_circuit_provider: if nb_circuit_provider["asn"] == asn: return False else: log.error("Duplicate provider with different ASN: %s: %s", name, asn) raise CommandExecutionError( "Duplicate provider with different ASN: {}: {}".format(name, asn) ) else: payload = {"name": name, "slug": slugify(name)} if asn: payload["asn"] = asn circuit_provider = _add("circuits", "providers", payload) if circuit_provider: return {"circuits": {"providers": {circuit_provider["id"]: payload}}} else: return circuit_provider def get_circuit_provider(name, asn=None): """ .. versionadded:: 2019.2.0 Get a circuit provider with a given name and optional ASN. name The name of the circuit provider asn The ASN of the circuit provider CLI Example: .. code-block:: bash salt myminion netbox.get_circuit_provider Telia 1299 """ if asn: nb_circuit_provider = get_("circuits", "providers", asn=asn) else: nb_circuit_provider = get_("circuits", "providers", name=name) return nb_circuit_provider def create_circuit_type(name): """ .. versionadded:: 2019.2.0 Create a new Netbox circuit type. name The name of the circuit type CLI Example: .. code-block:: bash salt myminion netbox.create_circuit_type Transit """ nb_circuit_type = get_("circuits", "circuit-types", slug=slugify(name)) if nb_circuit_type: return False else: payload = {"name": name, "slug": slugify(name)} circuit_type = _add("circuits", "circuit-types", payload) if circuit_type: return {"circuits": {"circuit-types": {circuit_type["id"]: payload}}} else: return circuit_type def create_circuit(name, provider_id, circuit_type, description=None): """ .. versionadded:: 2019.2.0 Create a new Netbox circuit name Name of the circuit provider_id The netbox id of the circuit provider circuit_type The name of the circuit type asn The ASN of the circuit provider description The description of the circuit CLI Example: .. code-block:: bash salt myminion netbox.create_circuit NEW_CIRCUIT_01 Telia Transit 1299 "New Telia circuit" """ nb_circuit_provider = get_("circuits", "providers", provider_id) nb_circuit_type = get_("circuits", "circuit-types", slug=slugify(circuit_type)) if nb_circuit_provider and nb_circuit_type: payload = { "cid": name, "provider": nb_circuit_provider["id"], "type": nb_circuit_type["id"], } if description: payload["description"] = description nb_circuit = get_("circuits", "circuits", cid=name) if nb_circuit: return False circuit = _add("circuits", "circuits", payload) if circuit: return {"circuits": {"circuits": {circuit["id"]: payload}}} else: return circuit else: return False def create_circuit_termination( circuit, interface, device, speed, xconnect_id=None, term_side="A" ): """ .. versionadded:: 2019.2.0 Terminate a circuit on an interface circuit The name of the circuit interface The name of the interface to terminate on device The name of the device the interface belongs to speed The speed of the circuit, in Kbps xconnect_id The cross-connect identifier term_side The side of the circuit termination CLI Example: .. code-block:: bash salt myminion netbox.create_circuit_termination NEW_CIRCUIT_01 xe-0/0/1 myminion 10000 xconnect_id=XCON01 """ nb_device = get_("dcim", "devices", name=device) nb_interface = get_("dcim", "interfaces", device_id=nb_device["id"], name=interface) nb_circuit = get_("circuits", "circuits", cid=circuit) if nb_circuit and nb_device: nb_termination = get_("circuits", "circuit-terminations", q=nb_circuit["cid"]) if nb_termination: return False payload = { "circuit": nb_circuit["id"], "interface": nb_interface["id"], "site": nb_device["site"]["id"], "port_speed": speed, "term_side": term_side, } if xconnect_id: payload["xconnect_id"] = xconnect_id circuit_termination = _add("circuits", "circuit-terminations", payload) if circuit_termination: return { "circuits": { "circuit-terminations": {circuit_termination["id"]: payload} } } else: return circuit_termination
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/netbox.py
0.604282
0.150528
netbox.py
pypi
import logging import salt.utils.compat import salt.utils.json import salt.utils.versions from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) # pylint: disable=import-error try: # pylint: disable=unused-import import boto import boto3 # pylint: enable=unused-import from botocore.exceptions import ClientError logging.getLogger("boto").setLevel(logging.CRITICAL) logging.getLogger("boto3").setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False # pylint: enable=import-error def __virtual__(): """ Only load if boto libraries exist and if boto libraries are greater than a given version. """ # the boto_lambda execution module relies on the connect_to_region() method # which was added in boto 2.8.0 # https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12 return salt.utils.versions.check_boto_reqs(boto_ver="2.8.0", boto3_ver="1.4.0") def __init__(opts): if HAS_BOTO: __utils__["boto3.assign_funcs"](__name__, "es") def exists(DomainName, region=None, key=None, keyid=None, profile=None): """ Given a domain name, check to see if the given domain exists. Returns True if the given domain exists and returns False if the given function does not exist. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.exists mydomain """ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: domain = conn.describe_elasticsearch_domain(DomainName=DomainName) return {"exists": True} except ClientError as e: if e.response.get("Error", {}).get("Code") == "ResourceNotFoundException": return {"exists": False} return {"error": __utils__["boto3.get_error"](e)} def status(DomainName, region=None, key=None, keyid=None, profile=None): """ Given a domain name describe its status. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.status mydomain """ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: domain = conn.describe_elasticsearch_domain(DomainName=DomainName) if domain and "DomainStatus" in domain: domain = domain.get("DomainStatus", {}) keys = ( "Endpoint", "Created", "Deleted", "DomainName", "DomainId", "EBSOptions", "SnapshotOptions", "AccessPolicies", "Processing", "AdvancedOptions", "ARN", "ElasticsearchVersion", ) return {"domain": {k: domain.get(k) for k in keys if k in domain}} else: return {"domain": None} except ClientError as e: return {"error": __utils__["boto3.get_error"](e)} def describe(DomainName, region=None, key=None, keyid=None, profile=None): """ Given a domain name describe its properties. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.describe mydomain """ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: domain = conn.describe_elasticsearch_domain_config(DomainName=DomainName) if domain and "DomainConfig" in domain: domain = domain["DomainConfig"] keys = ( "ElasticsearchClusterConfig", "EBSOptions", "AccessPolicies", "SnapshotOptions", "AdvancedOptions", ) return { "domain": { k: domain.get(k, {}).get("Options") for k in keys if k in domain } } else: return {"domain": None} except ClientError as e: return {"error": __utils__["boto3.get_error"](e)} def create( DomainName, ElasticsearchClusterConfig=None, EBSOptions=None, AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None, region=None, key=None, keyid=None, profile=None, ElasticsearchVersion=None, ): """ Given a valid config, create a domain. Returns {created: true} if the domain was created and returns {created: False} if the domain was not created. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.create mydomain \\ {'InstanceType': 't2.micro.elasticsearch', 'InstanceCount': 1, \\ 'DedicatedMasterEnabled': false, 'ZoneAwarenessEnabled': false} \\ {'EBSEnabled': true, 'VolumeType': 'gp2', 'VolumeSize': 10, \\ 'Iops': 0} \\ {"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", \\ "Resource": "arn:aws:es:us-east-1:111111111111:domain/mydomain/*", \\ "Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]} \\ {"AutomatedSnapshotStartHour": 0} \\ {"rest.action.multi.allow_explicit_index": "true"} """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) kwargs = {} for k in ( "ElasticsearchClusterConfig", "EBSOptions", "AccessPolicies", "SnapshotOptions", "AdvancedOptions", "ElasticsearchVersion", ): if locals()[k] is not None: val = locals()[k] if isinstance(val, str): try: val = salt.utils.json.loads(val) except ValueError as e: return { "updated": False, "error": "Error parsing {}: {}".format(k, e.message), } kwargs[k] = val if "AccessPolicies" in kwargs: kwargs["AccessPolicies"] = salt.utils.json.dumps(kwargs["AccessPolicies"]) if "ElasticsearchVersion" in kwargs: kwargs["ElasticsearchVersion"] = str(kwargs["ElasticsearchVersion"]) domain = conn.create_elasticsearch_domain(DomainName=DomainName, **kwargs) if domain and "DomainStatus" in domain: return {"created": True} else: log.warning("Domain was not created") return {"created": False} except ClientError as e: return {"created": False, "error": __utils__["boto3.get_error"](e)} def delete(DomainName, region=None, key=None, keyid=None, profile=None): """ Given a domain name, delete it. Returns {deleted: true} if the domain was deleted and returns {deleted: false} if the domain was not deleted. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.delete mydomain """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_elasticsearch_domain(DomainName=DomainName) return {"deleted": True} except ClientError as e: return {"deleted": False, "error": __utils__["boto3.get_error"](e)} def update( DomainName, ElasticsearchClusterConfig=None, EBSOptions=None, AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None, region=None, key=None, keyid=None, profile=None, ): """ Update the named domain to the configuration. Returns {updated: true} if the domain was updated and returns {updated: False} if the domain was not updated. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.update mydomain \\ {'InstanceType': 't2.micro.elasticsearch', 'InstanceCount': 1, \\ 'DedicatedMasterEnabled': false, 'ZoneAwarenessEnabled': false} \\ {'EBSEnabled': true, 'VolumeType': 'gp2', 'VolumeSize': 10, \\ 'Iops': 0} \\ {"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "es:*", \\ "Resource": "arn:aws:es:us-east-1:111111111111:domain/mydomain/*", \\ "Condition": {"IpAddress": {"aws:SourceIp": ["127.0.0.1"]}}}]} \\ {"AutomatedSnapshotStartHour": 0} \\ {"rest.action.multi.allow_explicit_index": "true"} """ call_args = {} for k in ( "ElasticsearchClusterConfig", "EBSOptions", "AccessPolicies", "SnapshotOptions", "AdvancedOptions", ): if locals()[k] is not None: val = locals()[k] if isinstance(val, str): try: val = salt.utils.json.loads(val) except ValueError as e: return { "updated": False, "error": "Error parsing {}: {}".format(k, e.message), } call_args[k] = val if "AccessPolicies" in call_args: call_args["AccessPolicies"] = salt.utils.json.dumps(call_args["AccessPolicies"]) try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) domain = conn.update_elasticsearch_domain_config( DomainName=DomainName, **call_args ) if not domain or "DomainConfig" not in domain: log.warning("Domain was not updated") return {"updated": False} return {"updated": True} except ClientError as e: return {"updated": False, "error": __utils__["boto3.get_error"](e)} def add_tags( DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None, **kwargs ): """ Add tags to a domain Returns {tagged: true} if the domain was tagged and returns {tagged: False} if the domain was not tagged. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.add_tags mydomain tag_a=tag_value tag_b=tag_value """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) tagslist = [] for k, v in kwargs.items(): if str(k).startswith("__"): continue tagslist.append({"Key": str(k), "Value": str(v)}) if ARN is None: if DomainName is None: raise SaltInvocationError( "One (but not both) of ARN or domain must be specified." ) domaindata = status( DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile, ) if not domaindata or "domain" not in domaindata: log.warning("Domain tags not updated") return {"tagged": False} ARN = domaindata.get("domain", {}).get("ARN") elif DomainName is not None: raise SaltInvocationError( "One (but not both) of ARN or domain must be specified." ) conn.add_tags(ARN=ARN, TagList=tagslist) return {"tagged": True} except ClientError as e: return {"tagged": False, "error": __utils__["boto3.get_error"](e)} def remove_tags( TagKeys, DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None ): """ Remove tags from a trail Returns {tagged: true} if the trail was tagged and returns {tagged: False} if the trail was not tagged. CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.remove_tags my_trail tag_a=tag_value tag_b=tag_value """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if ARN is None: if DomainName is None: raise SaltInvocationError( "One (but not both) of ARN or domain must be specified." ) domaindata = status( DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile, ) if not domaindata or "domain" not in domaindata: log.warning("Domain tags not updated") return {"tagged": False} ARN = domaindata.get("domain", {}).get("ARN") elif DomainName is not None: raise SaltInvocationError( "One (but not both) of ARN or domain must be specified." ) conn.remove_tags(ARN=domaindata.get("domain", {}).get("ARN"), TagKeys=TagKeys) return {"tagged": True} except ClientError as e: return {"tagged": False, "error": __utils__["boto3.get_error"](e)} def list_tags( DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None ): """ List tags of a trail Returns: tags: - {...} - {...} CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.list_tags my_trail """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if ARN is None: if DomainName is None: raise SaltInvocationError( "One (but not both) of ARN or domain must be specified." ) domaindata = status( DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile, ) if not domaindata or "domain" not in domaindata: log.warning("Domain tags not updated") return {"tagged": False} ARN = domaindata.get("domain", {}).get("ARN") elif DomainName is not None: raise SaltInvocationError( "One (but not both) of ARN or domain must be specified." ) ret = conn.list_tags(ARN=ARN) log.warning(ret) tlist = ret.get("TagList", []) tagdict = {} for tag in tlist: tagdict[tag.get("Key")] = tag.get("Value") return {"tags": tagdict} except ClientError as e: return {"error": __utils__["boto3.get_error"](e)}
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/boto_elasticsearch_domain.py
0.586641
0.200558
boto_elasticsearch_domain.py
pypi
import logging try: import ethtool HAS_ETHTOOL = True except ImportError: HAS_ETHTOOL = False log = logging.getLogger(__name__) ethtool_coalesce_map = { "pkt_rate_high": "pkt_rate_high", "pkt_rate_low": "pkt_rate_low", "sample_interval": "rate_sample_interval", "rx_usecs": "rx_coalesce_usecs", "rx_usecs_high": "rx_coalesce_usecs_high", "rx_usecs_irq": "rx_coalesce_usecs_irq", "rx_usecs_low": "rx_coalesce_usecs_low", "rx_frames": "rx_max_coalesced_frames", "rx_frames_high": "rx_max_coalesced_frames_high", "rx_frames_irg": "rx_max_coalesced_frames_irq", "rx_frames_low": "rx_max_coalesced_frames_low", "stats_block_usecs": "stats_block_coalesce_usecs", "tx_usecs": "tx_coalesce_usecs", "tx_usecs_high": "tx_coalesce_usecs_high", "tx_usecs_irq": "tx_coalesce_usecs_irq", "tx_usecs_low": "tx_coalesce_usecs_low", "tx_frames": "tx_max_coalesced_frames", "tx_frames_high": "tx_max_coalesced_frames_high", "tx_frames_irq": "tx_max_coalesced_frames_irq", "tx_frames_low": "tx_max_coalesced_frames_low", "adaptive_rx": "use_adaptive_rx_coalesce", "adaptive_tx": "use_adaptive_tx_coalesce", } ethtool_coalesce_remap = {} for k, v in ethtool_coalesce_map.items(): ethtool_coalesce_remap[v] = k ethtool_ring_map = { "rx": "rx_pending", "rx_max": "rx_max_pending", "rx_mini": "rx_mini_pending", "rx_mini_max": "rx_mini_max_pending", "rx_jumbo": "rx_jumbo_pending", "rx_jumbo_max": "rx_jumbo_max_pending", "tx": "tx_pending", "tx_max": "tx_max_pending", } ethtool_ring_remap = {} for k, v in ethtool_ring_map.items(): ethtool_ring_remap[v] = k # Define the module's virtual name __virtualname__ = "ethtool" def __virtual__(): """ Only load this module if python-ethtool is installed """ if HAS_ETHTOOL: return __virtualname__ else: return ( False, "The ethtool module could not be loaded: ethtool " "python libraries not found.", ) def show_ring(devname): """ Queries the specified network device for rx/tx ring parameter information CLI Example: .. code-block:: bash salt '*' ethtool.show_ring <devname> """ try: ring = ethtool.get_ringparam(devname) except OSError: log.error("Ring parameters not supported on %s", devname) return "Not supported" ret = {} for key, value in ring.items(): ret[ethtool_ring_remap[key]] = ring[key] return ret def show_coalesce(devname): """ Queries the specified network device for coalescing information CLI Example: .. code-block:: bash salt '*' ethtool.show_coalesce <devname> """ try: coalesce = ethtool.get_coalesce(devname) except OSError: log.error("Interrupt coalescing not supported on %s", devname) return "Not supported" ret = {} for key, value in coalesce.items(): ret[ethtool_coalesce_remap[key]] = coalesce[key] return ret def show_driver(devname): """ Queries the specified network device for associated driver information CLI Example: .. code-block:: bash salt '*' ethtool.show_driver <devname> """ try: module = ethtool.get_module(devname) except OSError: log.error("Driver information not implemented on %s", devname) return "Not implemented" try: businfo = ethtool.get_businfo(devname) except OSError: log.error("Bus information no available on %s", devname) return "Not available" ret = { "driver": module, "bus_info": businfo, } return ret def set_ring(devname, **kwargs): """ Changes the rx/tx ring parameters of the specified network device CLI Example: .. code-block:: bash salt '*' ethtool.set_ring <devname> [rx=N] [rx_mini=N] [rx_jumbo=N] [tx=N] """ try: ring = ethtool.get_ringparam(devname) except OSError: log.error("Ring parameters not supported on %s", devname) return "Not supported" changed = False for param, value in kwargs.items(): if param in ethtool_ring_map: param = ethtool_ring_map[param] if param in ring: if ring[param] != value: ring[param] = value changed = True try: if changed: ethtool.set_ringparam(devname, ring) return show_ring(devname) except OSError: log.error("Invalid ring arguments on %s: %s", devname, ring) return "Invalid arguments" def set_coalesce(devname, **kwargs): """ Changes the coalescing settings of the specified network device CLI Example: .. code-block:: bash salt '*' ethtool.set_coalesce <devname> [adaptive_rx=on|off] [adaptive_tx=on|off] [rx_usecs=N] [rx_frames=N] [rx_usecs_irq=N] [rx_frames_irq=N] [tx_usecs=N] [tx_frames=N] [tx_usecs_irq=N] [tx_frames_irq=N] [stats_block_usecs=N] [pkt_rate_low=N] [rx_usecs_low=N] [rx_frames_low=N] [tx_usecs_low=N] [tx_frames_low=N] [pkt_rate_high=N] [rx_usecs_high=N] [rx_frames_high=N] [tx_usecs_high=N] [tx_frames_high=N] [sample_interval=N] """ try: coalesce = ethtool.get_coalesce(devname) except OSError: log.error("Interrupt coalescing not supported on %s", devname) return "Not supported" changed = False for param, value in kwargs.items(): if param in ethtool_coalesce_map: param = ethtool_coalesce_map[param] if param in coalesce: if coalesce[param] != value: coalesce[param] = value changed = True try: if changed: # pylint: disable=too-many-function-args ethtool.set_coalesce(devname, coalesce) # pylint: enable=too-many-function-args return show_coalesce(devname) except OSError: log.error("Invalid coalesce arguments on %s: %s", devname, coalesce) return "Invalid arguments" def show_offload(devname): """ Queries the specified network device for the state of protocol offload and other features CLI Example: .. code-block:: bash salt '*' ethtool.show_offload <devname> """ try: sg = ethtool.get_sg(devname) and "on" or "off" except OSError: sg = "not supported" try: tso = ethtool.get_tso(devname) and "on" or "off" except OSError: tso = "not supported" try: ufo = ethtool.get_ufo(devname) and "on" or "off" except OSError: ufo = "not supported" try: gso = ethtool.get_gso(devname) and "on" or "off" except OSError: gso = "not supported" offload = { "scatter_gather": sg, "tcp_segmentation_offload": tso, "udp_fragmentation_offload": ufo, "generic_segmentation_offload": gso, } return offload def set_offload(devname, **kwargs): """ Changes the offload parameters and other features of the specified network device CLI Example: .. code-block:: bash salt '*' ethtool.set_offload <devname> tcp_segmentation_offload=on """ for param, value in kwargs.items(): if param == "tcp_segmentation_offload": value = value == "on" and 1 or 0 try: ethtool.set_tso(devname, value) except OSError: return "Not supported" return show_offload(devname)
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/ethtool.py
0.528533
0.237167
ethtool.py
pypi
CAN_USE_NAMECHEAP = True try: import salt.utils.namecheap except ImportError: CAN_USE_NAMECHEAP = False __virtualname__ = "namecheap_domains_dns" def __virtual__(): """ Check to make sure requests and xml are installed and requests """ if CAN_USE_NAMECHEAP: return "namecheap_domains_dns" return False def get_hosts(sld, tld): """ Retrieves DNS host record settings for the requested domain. returns a dictionary of information about the requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_hosts sld tld """ opts = salt.utils.namecheap.get_opts("namecheap.domains.dns.gethosts") opts["TLD"] = tld opts["SLD"] = sld response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domaindnsgethostsresult = response_xml.getElementsByTagName( "DomainDNSGetHostsResult" )[0] return salt.utils.namecheap.xml_to_dict(domaindnsgethostsresult) def get_list(sld, tld): """ Gets a list of DNS servers associated with the requested domain. returns a dictionary of information about requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_list sld tld """ opts = salt.utils.namecheap.get_opts("namecheap.domains.dns.getlist") opts["TLD"] = tld opts["SLD"] = sld response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domaindnsgetlistresult = response_xml.getElementsByTagName( "DomainDNSGetListResult" )[0] return salt.utils.namecheap.xml_to_dict(domaindnsgetlistresult) def set_hosts(sld, tld, hosts): """ Sets DNS host records settings for the requested domain. returns True if the host records were set successfully sld SLD of the domain name tld TLD of the domain name hosts Must be passed as a list of Python dictionaries, with each dictionary containing the following keys: - **hostname** - **recordtype** - One of ``A``, ``AAAA``, ``CNAME``, ``MX``, ``MXE``, ``TXT``, ``URL``, ``URL301``, or ``FRAME`` - **address** - URL or IP address - **ttl** - An integer between 60 and 60000 (default: ``1800``) Additionally, the ``mxpref`` key can be present, but must be accompanied by an ``emailtype`` key. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_hosts sld tld hosts """ opts = salt.utils.namecheap.get_opts("namecheap.domains.dns.setHosts") opts["SLD"] = sld opts["TLD"] = tld i = 1 for hostrecord in hosts: str_i = str(i) opts["HostName" + str_i] = hostrecord["hostname"] opts["RecordType" + str_i] = hostrecord["recordtype"] opts["Address" + str_i] = hostrecord["address"] if "ttl" in hostrecord: opts["TTL" + str_i] = hostrecord["ttl"] if "mxpref" in hostrecord: opts["MXPref" + str_i] = hostrecord["mxpref"] opts["EmailType"] = hostrecord["emailtype"] i += 1 response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName("DomainDNSSetHostsResult")[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute("IsSuccess")) def set_custom(sld, tld, nameservers): """ Sets domain to use custom DNS servers. returns True if the custom nameservers were set successfully sld SLD of the domain name tld TLD of the domain name nameservers array of strings List of nameservers to be associated with this domain CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_custom sld tld nameserver """ opts = salt.utils.namecheap.get_opts("namecheap.domains.dns.setCustom") opts["SLD"] = sld opts["TLD"] = tld opts["Nameservers"] = ",".join(nameservers) response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName("DomainDNSSetCustomResult")[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute("Update")) def set_default(sld, tld): """ Sets domain to use namecheap default DNS servers. Required for free services like Host record management, URL forwarding, email forwarding, dynamic DNS and other value added services. sld SLD of the domain name tld TLD of the domain name Returns ``True`` if the domain was successfully pointed at the default DNS servers. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_default sld tld """ opts = salt.utils.namecheap.get_opts("namecheap.domains.dns.setDefault") opts["SLD"] = sld opts["TLD"] = tld response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName("DomainDNSSetDefaultResult")[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute("Updated"))
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/namecheap_domains_dns.py
0.705379
0.156427
namecheap_domains_dns.py
pypi
import copy import logging import salt.utils.args import salt.utils.data import salt.utils.user from salt.exceptions import CommandExecutionError try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "user" def __virtual__(): """ Set the user module if the kernel is FreeBSD or DragonFly """ if HAS_PWD and __grains__["kernel"] in ("FreeBSD", "DragonFly"): return __virtualname__ return ( False, "The pw_user execution module cannot be loaded: the pwd python module is not" " available or the system is not FreeBSD.", ) def _get_gecos(name): """ Retrieve GECOS field info and return it in dictionary form """ try: gecos_field = pwd.getpwnam(name).pw_gecos.split(",", 3) except KeyError: raise CommandExecutionError("User '{}' does not exist".format(name)) if not gecos_field: return {} else: # Assign empty strings for any unspecified trailing GECOS fields while len(gecos_field) < 4: gecos_field.append("") return { "fullname": salt.utils.data.decode(gecos_field[0]), "roomnumber": salt.utils.data.decode(gecos_field[1]), "workphone": salt.utils.data.decode(gecos_field[2]), "homephone": salt.utils.data.decode(gecos_field[3]), } def _build_gecos(gecos_dict): """ Accepts a dictionary entry containing GECOS field names and their values, and returns a full GECOS comment string, to be used with pw usermod. """ return "{},{},{},{}".format( gecos_dict.get("fullname", ""), gecos_dict.get("roomnumber", ""), gecos_dict.get("workphone", ""), gecos_dict.get("homephone", ""), ) def _update_gecos(name, key, value): """ Common code to change a user's GECOS information """ if not isinstance(value, str): value = str(value) pre_info = _get_gecos(name) if not pre_info: return False if value == pre_info[key]: return True gecos_data = copy.deepcopy(pre_info) gecos_data[key] = value cmd = ["pw", "usermod", name, "-c", _build_gecos(gecos_data)] __salt__["cmd.run"](cmd, python_shell=False) post_info = info(name) return _get_gecos(name).get(key) == value def add( name, uid=None, gid=None, groups=None, home=None, shell=None, unique=True, fullname="", roomnumber="", workphone="", homephone="", createhome=True, loginclass=None, **kwargs ): """ Add a user to the minion CLI Example: .. code-block:: bash salt '*' user.add name <uid> <gid> <groups> <home> <shell> """ kwargs = salt.utils.args.clean_kwargs(**kwargs) if salt.utils.data.is_true(kwargs.pop("system", False)): log.warning("pw_user module does not support the 'system' argument") if kwargs: log.warning("Invalid kwargs passed to user.add") if isinstance(groups, str): groups = groups.split(",") cmd = ["pw", "useradd"] if uid: cmd.extend(["-u", uid]) if gid: cmd.extend(["-g", gid]) if groups: cmd.extend(["-G", ",".join(groups)]) if home is not None: cmd.extend(["-d", home]) if createhome is True: cmd.append("-m") if loginclass: cmd.extend(["-L", loginclass]) if shell: cmd.extend(["-s", shell]) if not salt.utils.data.is_true(unique): cmd.append("-o") gecos_field = _build_gecos( { "fullname": fullname, "roomnumber": roomnumber, "workphone": workphone, "homephone": homephone, } ) cmd.extend(["-c", gecos_field]) cmd.extend(["-n", name]) return __salt__["cmd.retcode"](cmd, python_shell=False) == 0 def delete(name, remove=False, force=False): """ Remove a user from the minion CLI Example: .. code-block:: bash salt '*' user.delete name remove=True force=True """ if salt.utils.data.is_true(force): log.error( "pw userdel does not support force-deleting user while user is logged in" ) cmd = ["pw", "userdel"] if remove: cmd.append("-r") cmd.extend(["-n", name]) return __salt__["cmd.retcode"](cmd, python_shell=False) == 0 def getent(refresh=False): """ Return the list of all info for all users CLI Example: .. code-block:: bash salt '*' user.getent """ if "user.getent" in __context__ and not refresh: return __context__["user.getent"] ret = [] for data in pwd.getpwall(): ret.append(info(data.pw_name)) __context__["user.getent"] = ret return ret def chuid(name, uid): """ Change the uid for a named user CLI Example: .. code-block:: bash salt '*' user.chuid foo 4376 """ pre_info = info(name) if not pre_info: raise CommandExecutionError("User '{}' does not exist".format(name)) if uid == pre_info["uid"]: return True cmd = ["pw", "usermod", "-u", uid, "-n", name] __salt__["cmd.run"](cmd, python_shell=False) return info(name).get("uid") == uid def chgid(name, gid): """ Change the default group of the user CLI Example: .. code-block:: bash salt '*' user.chgid foo 4376 """ pre_info = info(name) if not pre_info: raise CommandExecutionError("User '{}' does not exist".format(name)) if gid == pre_info["gid"]: return True cmd = ["pw", "usermod", "-g", gid, "-n", name] __salt__["cmd.run"](cmd, python_shell=False) return info(name).get("gid") == gid def chshell(name, shell): """ Change the default shell of the user CLI Example: .. code-block:: bash salt '*' user.chshell foo /bin/zsh """ pre_info = info(name) if not pre_info: raise CommandExecutionError("User '{}' does not exist".format(name)) if shell == pre_info["shell"]: return True cmd = ["pw", "usermod", "-s", shell, "-n", name] __salt__["cmd.run"](cmd, python_shell=False) return info(name).get("shell") == shell def chhome(name, home, persist=False): """ Set a new home directory for an existing user name Username to modify home New home directory to set persist : False Set to ``True`` to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory. CLI Example: .. code-block:: bash salt '*' user.chhome foo /home/users/foo True """ pre_info = info(name) if not pre_info: raise CommandExecutionError("User '{}' does not exist".format(name)) if home == pre_info["home"]: return True cmd = ["pw", "usermod", name, "-d", home] if persist: cmd.append("-m") __salt__["cmd.run"](cmd, python_shell=False) return info(name).get("home") == home def chgroups(name, groups, append=False): """ Change the groups to which a user belongs name Username to modify groups List of groups to set for the user. Can be passed as a comma-separated list or a Python list. append : False Set to ``True`` to append these groups to the user's existing list of groups. Otherwise, the specified groups will replace any existing groups for the user. CLI Example: .. code-block:: bash salt '*' user.chgroups foo wheel,root True """ if isinstance(groups, str): groups = groups.split(",") ugrps = set(list_groups(name)) if ugrps == set(groups): return True if append: groups += ugrps cmd = ["pw", "usermod", "-G", ",".join(groups), "-n", name] return __salt__["cmd.retcode"](cmd, python_shell=False) == 0 def chfullname(name, fullname): """ Change the user's Full Name CLI Example: .. code-block:: bash salt '*' user.chfullname foo "Foo Bar" """ return _update_gecos(name, "fullname", fullname) def chroomnumber(name, roomnumber): """ Change the user's Room Number CLI Example: .. code-block:: bash salt '*' user.chroomnumber foo 123 """ return _update_gecos(name, "roomnumber", roomnumber) def chworkphone(name, workphone): """ Change the user's Work Phone CLI Example: .. code-block:: bash salt '*' user.chworkphone foo "7735550123" """ return _update_gecos(name, "workphone", workphone) def chhomephone(name, homephone): """ Change the user's Home Phone CLI Example: .. code-block:: bash salt '*' user.chhomephone foo "7735551234" """ return _update_gecos(name, "homephone", homephone) def chloginclass(name, loginclass, root=None): """ Change the default login class of the user .. versionadded:: 2016.3.5 CLI Example: .. code-block:: bash salt '*' user.chloginclass foo staff """ if loginclass == get_loginclass(name): return True cmd = ["pw", "usermod", "-L", "{}".format(loginclass), "-n", "{}".format(name)] __salt__["cmd.run"](cmd, python_shell=False) return get_loginclass(name) == loginclass def info(name): """ Return user information CLI Example: .. code-block:: bash salt '*' user.info root """ ret = {} try: data = pwd.getpwnam(name) ret["gid"] = data.pw_gid ret["groups"] = list_groups(name) ret["home"] = data.pw_dir ret["name"] = data.pw_name ret["passwd"] = data.pw_passwd ret["shell"] = data.pw_shell ret["uid"] = data.pw_uid # Put GECOS info into a list gecos_field = data.pw_gecos.split(",", 3) # Assign empty strings for any unspecified GECOS fields while len(gecos_field) < 4: gecos_field.append("") ret["fullname"] = gecos_field[0] ret["roomnumber"] = gecos_field[1] ret["workphone"] = gecos_field[2] ret["homephone"] = gecos_field[3] except KeyError: return {} return ret def get_loginclass(name): """ Get the login class of the user .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' user.get_loginclass foo """ userinfo = __salt__["cmd.run_stdout"](["pw", "usershow", "-n", name]) userinfo = userinfo.split(":") return userinfo[4] if len(userinfo) == 10 else "" def list_groups(name): """ Return a list of groups the named user belongs to CLI Example: .. code-block:: bash salt '*' user.list_groups foo """ return salt.utils.user.get_group_list(name) def list_users(): """ Return a list of all users CLI Example: .. code-block:: bash salt '*' user.list_users """ return sorted(user.pw_name for user in pwd.getpwall()) def rename(name, new_name): """ Change the username for a named user CLI Example: .. code-block:: bash salt '*' user.rename name new_name """ current_info = info(name) if not current_info: raise CommandExecutionError("User '{}' does not exist".format(name)) new_info = info(new_name) if new_info: raise CommandExecutionError("User '{}' already exists".format(new_name)) cmd = ["pw", "usermod", "-l", new_name, "-n", name] __salt__["cmd.run"](cmd) post_info = info(new_name) if post_info["name"] != current_info["name"]: return post_info["name"] == new_name return False
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/pw_user.py
0.438785
0.171998
pw_user.py
pypi
import salt.utils.functools import salt.utils.json import salt.utils.pagerduty def __virtual__(): """ No dependencies outside of what Salt itself requires """ return True def list_services(profile=None, api_key=None): """ List services belonging to this account CLI Example: .. code-block:: bash salt myminion pagerduty.list_services my-pagerduty-account """ return salt.utils.pagerduty.list_items( "services", "name", __salt__["config.option"](profile), api_key, opts=__opts__ ) def list_incidents(profile=None, api_key=None): """ List incidents belonging to this account CLI Example: .. code-block:: bash salt myminion pagerduty.list_incidents my-pagerduty-account """ return salt.utils.pagerduty.list_items( "incidents", "id", __salt__["config.option"](profile), api_key, opts=__opts__ ) def list_users(profile=None, api_key=None): """ List users belonging to this account CLI Example: .. code-block:: bash salt myminion pagerduty.list_users my-pagerduty-account """ return salt.utils.pagerduty.list_items( "users", "id", __salt__["config.option"](profile), api_key, opts=__opts__ ) def list_schedules(profile=None, api_key=None): """ List schedules belonging to this account CLI Example: .. code-block:: bash salt myminion pagerduty.list_schedules my-pagerduty-account """ return salt.utils.pagerduty.list_items( "schedules", "id", __salt__["config.option"](profile), api_key, opts=__opts__ ) def list_windows(profile=None, api_key=None): """ List maintenance windows belonging to this account CLI Example: .. code-block:: bash salt myminion pagerduty.list_windows my-pagerduty-account salt myminion pagerduty.list_maintenance_windows my-pagerduty-account """ return salt.utils.pagerduty.list_items( "maintenance_windows", "id", __salt__["config.option"](profile), api_key, opts=__opts__, ) # The long version, added for consistency list_maintenance_windows = salt.utils.functools.alias_function( list_windows, "list_maintenance_windows" ) def list_policies(profile=None, api_key=None): """ List escalation policies belonging to this account CLI Example: .. code-block:: bash salt myminion pagerduty.list_policies my-pagerduty-account salt myminion pagerduty.list_escalation_policies my-pagerduty-account """ return salt.utils.pagerduty.list_items( "escalation_policies", "id", __salt__["config.option"](profile), api_key, opts=__opts__, ) # The long version, added for consistency list_escalation_policies = salt.utils.functools.alias_function( list_policies, "list_escalation_policies" ) def create_event( service_key=None, description=None, details=None, incident_key=None, profile=None ): """ Create an event in PagerDuty. Designed for use in states. CLI Example: .. code-block:: yaml salt myminion pagerduty.create_event <service_key> <description> <details> \ profile=my-pagerduty-account The following parameters are required: service_key This key can be found by using pagerduty.list_services. description This is a short description of the event. details This can be a more detailed description of the event. profile This refers to the configuration profile to use to connect to the PagerDuty service. """ trigger_url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" if isinstance(details, str): details = salt.utils.yaml.safe_load(details) if isinstance(details, str): details = {"details": details} ret = salt.utils.json.loads( salt.utils.pagerduty.query( method="POST", profile_dict=__salt__["config.option"](profile), api_key=service_key, data={ "service_key": service_key, "incident_key": incident_key, "event_type": "trigger", "description": description, "details": details, }, url=trigger_url, opts=__opts__, ) ) return ret
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/pagerduty.py
0.699768
0.184951
pagerduty.py
pypi
import fnmatch import glob import logging import os import re import shlex import salt.utils.systemd __func_alias__ = {"reload_": "reload"} # Define the module's virtual name __virtualname__ = "service" log = logging.getLogger(__name__) def __virtual__(): """ Only work on Debian and when systemd isn't running """ if ( __grains__["os"] in ( "Debian", "Raspbian", "Devuan", "NILinuxRT", ) and not salt.utils.systemd.booted(__context__) ): return __virtualname__ else: return ( False, "The debian_service module could not be loaded: " "unsupported OS family and/or systemd running.", ) def _service_cmd(*args): return "service {} {}".format(args[0], " ".join(args[1:])) def _get_runlevel(): """ returns the current runlevel """ out = __salt__["cmd.run"]("runlevel") # unknown can be returned while inside a container environment, since # this is due to a lack of init, it should be safe to assume runlevel # 2, which is Debian's default. If not, all service related states # will throw an out of range exception here which will cause # other functions to fail. if "unknown" in out: return "2" else: return out.split()[1] def get_enabled(): """ Return a list of service that are enabled on boot CLI Example: .. code-block:: bash salt '*' service.get_enabled """ prefix = "/etc/rc[S{}].d/S".format(_get_runlevel()) ret = set() for line in [x.rsplit(os.sep, 1)[-1] for x in glob.glob("{}*".format(prefix))]: ret.add(re.split(r"\d+", line)[-1]) return sorted(ret) def get_disabled(): """ Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled """ return sorted(set(get_all()) - set(get_enabled())) def available(name): """ Returns ``True`` if the specified service is available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.available sshd """ return name in get_all() def missing(name): """ The inverse of service.available. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd """ return name not in get_all() def get_all(): """ Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all """ ret = set() lines = glob.glob("/etc/init.d/*") for line in lines: service = line.split("/etc/init.d/")[1] # Remove README. If it's an enabled service, it will be added back in. if service != "README": ret.add(service) return sorted(ret | set(get_enabled())) def start(name): """ Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> """ cmd = _service_cmd(name, "start") return not __salt__["cmd.retcode"](cmd) def stop(name): """ Stop the specified service CLI Example: .. code-block:: bash salt '*' service.stop <service name> """ cmd = _service_cmd(name, "stop") return not __salt__["cmd.retcode"](cmd) def restart(name): """ Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> """ cmd = _service_cmd(name, "restart") return not __salt__["cmd.retcode"](cmd) def reload_(name): """ Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> """ cmd = _service_cmd(name, "reload") return not __salt__["cmd.retcode"](cmd) def force_reload(name): """ Force-reload the named service CLI Example: .. code-block:: bash salt '*' service.force_reload <service name> """ cmd = _service_cmd(name, "force-reload") return not __salt__["cmd.retcode"](cmd) def status(name, sig=None): """ Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Signature to use to find the service via ps Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> [service signature] """ if sig: return bool(__salt__["status.pid"](sig)) contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = _service_cmd(service, "status") results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True) if contains_globbing: return results return results[name] def enable(name, **kwargs): """ Enable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.enable <service name> """ cmd = "insserv {0} && update-rc.d {0} enable".format(shlex.quote(name)) return not __salt__["cmd.retcode"](cmd, python_shell=True) def disable(name, **kwargs): """ Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> """ cmd = "update-rc.d {} disable".format(name) return not __salt__["cmd.retcode"](cmd) def enabled(name, **kwargs): """ Return True if the named service is enabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.enabled <service name> """ return name in get_enabled() def disabled(name): """ Return True if the named service is disabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.disabled <service name> """ return name in get_disabled()
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/debian_service.py
0.604282
0.173849
debian_service.py
pypi