code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import collections
def __virtual__():
"""
Only load if the mssql module is present
"""
if "mssql.version" in __salt__:
return True
return (False, "mssql module could not be loaded")
def _normalize_options(options):
if type(options) in [dict, collections.OrderedDict]:
return ["{}={}".format(k, v) for k, v in options.items()]
if type(options) is list and (not options or type(options[0]) is str):
return options
# Invalid options
if type(options) is not list or type(options[0]) not in [
dict,
collections.OrderedDict,
]:
return []
return [o for d in options for o in _normalize_options(d)]
def present(name, containment="NONE", options=None, **kwargs):
"""
Ensure that the named database is present with the specified options
name
The name of the database to manage
containment
Defaults to NONE
options
Can be a list of strings, a dictionary, or a list of dictionaries
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if __salt__["mssql.db_exists"](name, **kwargs):
ret[
"comment"
] = "Database {} is already present (Not going to try to set its options)".format(
name
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is set to be added".format(name)
return ret
db_created = __salt__["mssql.db_create"](
name,
containment=containment,
new_database_options=_normalize_options(options),
**kwargs
)
if (
db_created is not True
): # Non-empty strings are also evaluated to True, so we cannot use if not db_created:
ret["result"] = False
ret["comment"] += "Database {} failed to be created: {}".format(
name, db_created
)
return ret
ret["comment"] += "Database {} has been added".format(name)
ret["changes"][name] = "Present"
return ret
def absent(name, **kwargs):
"""
Ensure that the named database is absent
name
The name of the database to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not __salt__["mssql.db_exists"](name):
ret["comment"] = "Database {} is not present".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is set to be removed".format(name)
return ret
if __salt__["mssql.db_remove"](name, **kwargs):
ret["comment"] = "Database {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
# else:
ret["result"] = False
ret["comment"] = "Database {} failed to be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mssql_database.py | 0.502197 | 0.212722 | mssql_database.py | pypi |
def __virtual__():
"""
Only load if the influxdb08 module is available
"""
if "influxdb08.db_exists" in __salt__:
return "influxdb08_database"
return (False, "influxdb08 module could not be loaded")
def present(name, user=None, password=None, host=None, port=None):
"""
Ensure that the named database is present
name
The name of the database to create
user
The user to connect as (must be able to remove the database)
password
The password of the user
host
The host to connect to
port
The port to connect to
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check if database exists
if not __salt__["influxdb08.db_exists"](name, user, password, host, port):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is absent and needs to be created".format(
name
)
return ret
if __salt__["influxdb08.db_create"](name, user, password, host, port):
ret["comment"] = "Database {} has been created".format(name)
ret["changes"][name] = "Present"
return ret
else:
ret["comment"] = "Failed to create database {}".format(name)
ret["result"] = False
return ret
# fallback
ret["comment"] = "Database {} is already present, so cannot be created".format(name)
return ret
def absent(name, user=None, password=None, host=None, port=None):
"""
Ensure that the named database is absent
name
The name of the database to remove
user
The user to connect as (must be able to remove the database)
password
The password of the user
host
The host to connect to
port
The port to connect to
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check if database exists and remove it
if __salt__["influxdb08.db_exists"](name, user, password, host, port):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is present and needs to be removed".format(
name
)
return ret
if __salt__["influxdb08.db_remove"](name, user, password, host, port):
ret["comment"] = "Database {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "Failed to remove database {}".format(name)
ret["result"] = False
return ret
# fallback
ret["comment"] = "Database {} is not present, so it cannot be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/influxdb08_database.py | 0.518059 | 0.15241 | influxdb08_database.py | pypi |
from salt.exceptions import CommandExecutionError, SaltInvocationError
__docformat__ = "restructuredtext en"
__virtualname__ = "lxd_container"
# Keep in sync with: https://github.com/lxc/lxd/blob/master/shared/status.go
CONTAINER_STATUS_RUNNING = 103
CONTAINER_STATUS_FROZEN = 110
CONTAINER_STATUS_STOPPED = 102
def __virtual__():
"""
Only load if the lxd module is available in __salt__
"""
if "lxd.version" in __salt__:
return __virtualname__
return (False, "lxd module could not be loaded")
def present(
name,
running=None,
source=None,
profiles=None,
config=None,
devices=None,
architecture="x86_64",
ephemeral=False,
restart_on_change=False,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
):
"""
Create the named container if it does not exist
name
The name of the container to be created
running : None
* If ``True``, ensure that the container is running
* If ``False``, ensure that the container is stopped
* If ``None``, do nothing with regards to the running state of the
container
source : None
Can be either a string containing an image alias:
.. code-block:: none
"xenial/amd64"
or an dict with type "image" with alias:
.. code-block:: python
{"type": "image",
"alias": "xenial/amd64"}
or image with "fingerprint":
.. code-block:: python
{"type": "image",
"fingerprint": "SHA-256"}
or image with "properties":
.. code-block:: python
{"type": "image",
"properties": {
"os": "ubuntu",
"release": "14.04",
"architecture": "x86_64"
}}
or none:
.. code-block:: python
{"type": "none"}
or copy:
.. code-block:: python
{"type": "copy",
"source": "my-old-container"}
profiles : ['default']
List of profiles to apply on this container
config :
A config dict or None (None = unset).
Can also be a list:
.. code-block:: python
[{'key': 'boot.autostart', 'value': 1},
{'key': 'security.privileged', 'value': '1'}]
devices :
A device dict or None (None = unset).
architecture : 'x86_64'
Can be one of the following:
* unknown
* i686
* x86_64
* armv7l
* aarch64
* ppc
* ppc64
* ppc64le
* s390x
ephemeral : False
Destroy this container after stop?
restart_on_change : False
Restart the container when we detect changes on the config or
its devices?
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normally uses self-signed certificates.
"""
if profiles is None:
profiles = ["default"]
if source is None:
source = {}
ret = {
"name": name,
"running": running,
"profiles": profiles,
"source": source,
"config": config,
"devices": devices,
"architecture": architecture,
"ephemeral": ephemeral,
"restart_on_change": restart_on_change,
"remote_addr": remote_addr,
"cert": cert,
"key": key,
"verify_cert": verify_cert,
"changes": {},
}
container = None
try:
container = __salt__["lxd.container_get"](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Profile not found
pass
if container is None:
if __opts__["test"]:
# Test is on, just return that we would create the container
msg = 'Would create the container "{}"'.format(name)
ret["changes"] = {"created": msg}
if running is True:
msg = msg + " and start it."
ret["changes"]["started"] = 'Would start the container "{}"'.format(
name
)
ret["changes"] = {"created": msg}
return _unchanged(ret, msg)
# create the container
try:
__salt__["lxd.container_create"](
name,
source,
profiles,
config,
devices,
architecture,
ephemeral,
True, # Wait
remote_addr,
cert,
key,
verify_cert,
)
except CommandExecutionError as e:
return _error(ret, str(e))
msg = 'Created the container "{}"'.format(name)
ret["changes"] = {"created": msg}
if running is True:
try:
__salt__["lxd.container_start"](
name, remote_addr, cert, key, verify_cert
)
except CommandExecutionError as e:
return _error(ret, str(e))
msg = msg + " and started it."
ret["changes"] = {"started": 'Started the container "{}"'.format(name)}
return _success(ret, msg)
# Container exists, lets check for differences
new_profiles = set(map(str, profiles))
old_profiles = set(map(str, container.profiles))
container_changed = False
profile_changes = []
# Removed profiles
for k in old_profiles.difference(new_profiles):
if not __opts__["test"]:
profile_changes.append('Removed profile "{}"'.format(k))
old_profiles.discard(k)
else:
profile_changes.append('Would remove profile "{}"'.format(k))
# Added profiles
for k in new_profiles.difference(old_profiles):
if not __opts__["test"]:
profile_changes.append('Added profile "{}"'.format(k))
old_profiles.add(k)
else:
profile_changes.append('Would add profile "{}"'.format(k))
if profile_changes:
container_changed = True
ret["changes"]["profiles"] = profile_changes
container.profiles = list(old_profiles)
# Config and devices changes
config, devices = __salt__["lxd.normalize_input_values"](config, devices)
changes = __salt__["lxd.sync_config_devices"](
container, config, devices, __opts__["test"]
)
if changes:
container_changed = True
ret["changes"].update(changes)
is_running = container.status_code == CONTAINER_STATUS_RUNNING
if not __opts__["test"]:
try:
__salt__["lxd.pylxd_save_object"](container)
except CommandExecutionError as e:
return _error(ret, str(e))
if running != is_running:
if running is True:
if __opts__["test"]:
changes["running"] = "Would start the container"
return _unchanged(
ret,
'Container "{}" would get changed and started.'.format(name),
)
else:
container.start(wait=True)
changes["running"] = "Started the container"
elif running is False:
if __opts__["test"]:
changes["stopped"] = "Would stopped the container"
return _unchanged(
ret,
'Container "{}" would get changed and stopped.'.format(name),
)
else:
container.stop(wait=True)
changes["stopped"] = "Stopped the container"
if (
(running is True or running is None)
and is_running
and restart_on_change
and container_changed
):
if __opts__["test"]:
changes["restarted"] = "Would restart the container"
return _unchanged(ret, 'Would restart the container "{}"'.format(name))
else:
container.restart(wait=True)
changes["restarted"] = 'Container "{}" has been restarted'.format(name)
return _success(ret, 'Container "{}" has been restarted'.format(name))
if not container_changed:
return _success(ret, "No changes")
if __opts__["test"]:
return _unchanged(ret, 'Container "{}" would get changed.'.format(name))
return _success(ret, "{} changes".format(len(ret["changes"].keys())))
def absent(name, stop=False, remote_addr=None, cert=None, key=None, verify_cert=True):
"""
Ensure a LXD container is not present, destroying it if present
name :
The name of the container to destroy
stop :
stop before destroying
default: false
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normally uses self-signed certificates.
"""
ret = {
"name": name,
"stop": stop,
"remote_addr": remote_addr,
"cert": cert,
"key": key,
"verify_cert": verify_cert,
"changes": {},
}
try:
container = __salt__["lxd.container_get"](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Container not found
return _success(ret, 'Container "{}" not found.'.format(name))
if __opts__["test"]:
ret["changes"] = {"removed": 'Container "{}" would get deleted.'.format(name)}
return _unchanged(ret, ret["changes"]["removed"])
if stop and container.status_code == CONTAINER_STATUS_RUNNING:
container.stop(wait=True)
container.delete(wait=True)
ret["changes"]["deleted"] = 'Container "{}" has been deleted.'.format(name)
return _success(ret, ret["changes"]["deleted"])
def running(
name, restart=False, remote_addr=None, cert=None, key=None, verify_cert=True
):
"""
Ensure a LXD container is running and restart it if restart is True
name :
The name of the container to start/restart.
restart :
restart the container if it is already started.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normally uses self-signed certificates.
"""
ret = {
"name": name,
"restart": restart,
"remote_addr": remote_addr,
"cert": cert,
"key": key,
"verify_cert": verify_cert,
"changes": {},
}
try:
container = __salt__["lxd.container_get"](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Container not found
return _error(ret, 'Container "{}" not found'.format(name))
is_running = container.status_code == CONTAINER_STATUS_RUNNING
if is_running:
if not restart:
return _success(ret, 'The container "{}" is already running'.format(name))
else:
if __opts__["test"]:
ret["changes"]["restarted"] = 'Would restart the container "{}"'.format(
name
)
return _unchanged(ret, ret["changes"]["restarted"])
else:
container.restart(wait=True)
ret["changes"]["restarted"] = 'Restarted the container "{}"'.format(
name
)
return _success(ret, ret["changes"]["restarted"])
if __opts__["test"]:
ret["changes"]["started"] = 'Would start the container "{}"'.format(name)
return _unchanged(ret, ret["changes"]["started"])
container.start(wait=True)
ret["changes"]["started"] = 'Started the container "{}"'.format(name)
return _success(ret, ret["changes"]["started"])
def frozen(name, start=True, remote_addr=None, cert=None, key=None, verify_cert=True):
"""
Ensure a LXD container is frozen, start and freeze it if start is true
name :
The name of the container to freeze
start :
start and freeze it
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normally uses self-signed certificates.
"""
ret = {
"name": name,
"start": start,
"remote_addr": remote_addr,
"cert": cert,
"key": key,
"verify_cert": verify_cert,
"changes": {},
}
try:
container = __salt__["lxd.container_get"](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Container not found
return _error(ret, 'Container "{}" not found'.format(name))
if container.status_code == CONTAINER_STATUS_FROZEN:
return _success(ret, 'Container "{}" is alredy frozen'.format(name))
is_running = container.status_code == CONTAINER_STATUS_RUNNING
if not is_running and not start:
return _error(
ret,
'Container "{}" is not running and start is False, cannot freeze it'.format(
name
),
)
elif not is_running and start:
if __opts__["test"]:
ret["changes"][
"started"
] = 'Would start the container "{}" and freeze it after'.format(name)
return _unchanged(ret, ret["changes"]["started"])
else:
container.start(wait=True)
ret["changes"]["started"] = 'Start the container "{}"'.format(name)
if __opts__["test"]:
ret["changes"]["frozen"] = 'Would freeze the container "{}"'.format(name)
return _unchanged(ret, ret["changes"]["frozen"])
container.freeze(wait=True)
ret["changes"]["frozen"] = 'Froze the container "{}"'.format(name)
return _success(ret, ret["changes"]["frozen"])
def stopped(name, kill=False, remote_addr=None, cert=None, key=None, verify_cert=True):
"""
Ensure a LXD container is stopped, kill it if kill is true else stop it
name :
The name of the container to stop
kill :
kill if true
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normally uses self-signed certificates.
"""
ret = {
"name": name,
"kill": kill,
"remote_addr": remote_addr,
"cert": cert,
"key": key,
"verify_cert": verify_cert,
"changes": {},
}
try:
container = __salt__["lxd.container_get"](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Container not found
return _error(ret, 'Container "{}" not found'.format(name))
if container.status_code == CONTAINER_STATUS_STOPPED:
return _success(ret, 'Container "{}" is already stopped'.format(name))
if __opts__["test"]:
ret["changes"]["stopped"] = 'Would stop the container "{}"'.format(name)
return _unchanged(ret, ret["changes"]["stopped"])
container.stop(force=kill, wait=True)
ret["changes"]["stopped"] = 'Stopped the container "{}"'.format(name)
return _success(ret, ret["changes"]["stopped"])
def migrated(
name,
remote_addr,
cert,
key,
verify_cert,
src_remote_addr,
stop_and_start=False,
src_cert=None,
src_key=None,
src_verify_cert=None,
):
"""Ensure a container is migrated to another host
If the container is running, it either must be shut down
first (use stop_and_start=True) or criu must be installed
on the source and destination machines.
For this operation both certs need to be authenticated,
use :mod:`lxd.authenticate <salt.states.lxd.authenticate`
to authenticate your cert(s).
name :
The container to migrate
remote_addr :
An URL to the destination remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normally uses self-signed certificates.
src_remote_addr :
An URL to the source remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
stop_and_start:
Stop before migrating and start after
src_cert :
PEM Formatted SSL Zertifikate, if None we copy "cert"
Examples:
~/.config/lxc/client.crt
src_key :
PEM Formatted SSL Key, if None we copy "key"
Examples:
~/.config/lxc/client.key
src_verify_cert :
Wherever to verify the cert, if None we copy "verify_cert"
"""
ret = {
"name": name,
"remote_addr": remote_addr,
"cert": cert,
"key": key,
"verify_cert": verify_cert,
"src_remote_addr": src_remote_addr,
"src_and_start": stop_and_start,
"src_cert": src_cert,
"src_key": src_key,
"changes": {},
}
dest_container = None
try:
dest_container = __salt__["lxd.container_get"](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Destination container not found
pass
if dest_container is not None:
return _success(ret, 'Container "{}" exists on the destination'.format(name))
if src_verify_cert is None:
src_verify_cert = verify_cert
try:
__salt__["lxd.container_get"](
name, src_remote_addr, src_cert, src_key, src_verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, str(e))
except SaltInvocationError as e:
# Container not found
return _error(ret, 'Source Container "{}" not found'.format(name))
if __opts__["test"]:
ret["changes"][
"migrated"
] = 'Would migrate the container "{}" from "{}" to "{}"'.format(
name, src_remote_addr, remote_addr
)
return _unchanged(ret, ret["changes"]["migrated"])
try:
__salt__["lxd.container_migrate"](
name,
stop_and_start,
remote_addr,
cert,
key,
verify_cert,
src_remote_addr,
src_cert,
src_key,
src_verify_cert,
)
except CommandExecutionError as e:
return _error(ret, str(e))
ret["changes"]["migrated"] = 'Migrated the container "{}" from "{}" to "{}"'.format(
name, src_remote_addr, remote_addr
)
return _success(ret, ret["changes"]["migrated"])
def _success(ret, success_msg):
ret["result"] = True
ret["comment"] = success_msg
if "changes" not in ret:
ret["changes"] = {}
return ret
def _unchanged(ret, msg):
ret["result"] = None
ret["comment"] = msg
if "changes" not in ret:
ret["changes"] = {}
return ret
def _error(ret, err_msg):
ret["result"] = False
ret["comment"] = err_msg
if "changes" not in ret:
ret["changes"] = {}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/lxd_container.py | 0.706798 | 0.187783 | lxd_container.py | pypi |
def present(name, **kwargs):
"""
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
Requires python-dateutil.
cron
This will schedule the job at the specified time(s)
using the crontab format.
Requires python-croniter.
run_on_start
Whether the scheduled job will run when Salt minion starts, or the job will be
skipped **once** and run at the next scheduled run. Value should be a
boolean.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format. Requires python-dateutil.
once
This will schedule a job to run once on the specified date.
once_fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ``once_fmt`` option.
enabled
Whether the scheduled job should be enabled or disabled. Value should be a boolean.
return_job
Whether to return information to the Salt master upon job completion.
metadata
Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
returner
The returner to use to return the results of the scheduled job.
return_config
The alternative configuration to use for returner configuration options.
return_kwargs
Any individual returner configuration items to override. Should be passed
as a dictionary.
persist
Whether changes to the scheduled job should be saved, defaults to True.
skip_during_range
This will ensure that the scheduled command does not run within the
range specified. The range parameter must be a dictionary with the
date strings using the dateutil format. Requires python-dateutil.
run_after_skip_range
Whether the scheduled job should run immediately after the skip_during_range time
period ends.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
new_item = __salt__["schedule.build_schedule_item"](name, **kwargs)
# See if the new_item is valid
if isinstance(new_item, dict):
if "result" in new_item and not new_item["result"]:
ret["result"] = new_item["result"]
ret["comment"] = new_item["comment"]
return ret
# The schedule.list gives us an item that is guaranteed to have an
# 'enabled' argument. Before comparing, add 'enabled' if it's not
# available (assume True, like schedule.list does)
if "enabled" not in new_item:
new_item["enabled"] = True
if new_item == current_schedule[name]:
ret["comment"].append("Job {} in correct state".format(name))
else:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.modify"](name, **kwargs)
ret["comment"].append(result["comment"])
ret["changes"] = result["changes"]
else:
result = __salt__["schedule.modify"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Modifying job {} in schedule".format(name))
ret["changes"] = result["changes"]
else:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.add"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.add"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Adding new job {} to schedule".format(name))
ret["changes"] = result["changes"]
ret["comment"] = "\n".join(ret["comment"])
return ret
def absent(name, **kwargs):
"""
Ensure a job is absent from the schedule
name
The unique name that is given to the scheduled job.
persist
Whether changes to the scheduled job should be saved, defaults to True.
When used with absent this will decide whether the scheduled job will be removed
from the saved scheduled jobs and not be available when the Salt minion is
restarted.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.delete"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.delete"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Removed job {} from schedule".format(name))
ret["changes"] = result["changes"]
else:
ret["comment"].append("Job {} not present in schedule".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def enabled(name, **kwargs):
"""
Ensure a job is enabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether changes to the scheduled job should be saved, defaults to True.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.enable_job"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.enable_job"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["changes"] = result["changes"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Enabled job {} from schedule".format(name))
else:
ret["comment"].append("Job {} not present in schedule".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def disabled(name, **kwargs):
"""
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether changes to the scheduled job should be saved, defaults to True.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_schedule = __salt__["schedule.list"](show_all=True, return_yaml=False)
if name in current_schedule:
if "test" in __opts__ and __opts__["test"]:
kwargs["test"] = True
result = __salt__["schedule.disable_job"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["schedule.disable_job"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Disabled job {} from schedule".format(name))
else:
ret["comment"].append("Job {} not present in schedule".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/schedule.py | 0.762114 | 0.620708 | schedule.py | pypi |
import logging
import xml.etree.ElementTree as ET
import salt.utils.xmlutil as xml
log = logging.getLogger(__name__)
def __virtual__():
if "panos.commit" in __salt__:
return True
return (False, "panos module could not be loaded")
def _build_members(members, anycheck=False):
"""
Builds a member formatted string for XML operation.
"""
if isinstance(members, list):
# This check will strip down members to a single any statement
if anycheck and "any" in members:
return "<member>any</member>"
response = ""
for m in members:
response += "<member>{}</member>".format(m)
return response
else:
return "<member>{}</member>".format(members)
def _default_ret(name):
"""
Set the default response values.
"""
ret = {"name": name, "changes": {}, "commit": None, "result": False, "comment": ""}
return ret
def _edit_config(xpath, element):
"""
Sends an edit request to the device.
"""
query = {"type": "config", "action": "edit", "xpath": xpath, "element": element}
response = __proxy__["panos.call"](query)
return _validate_response(response)
def _get_config(xpath):
"""
Retrieves an xpath from the device.
"""
query = {"type": "config", "action": "get", "xpath": xpath}
response = __proxy__["panos.call"](query)
return response
def _move_after(xpath, target):
"""
Moves an xpath to the after of its section.
"""
query = {
"type": "config",
"action": "move",
"xpath": xpath,
"where": "after",
"dst": target,
}
response = __proxy__["panos.call"](query)
return _validate_response(response)
def _move_before(xpath, target):
"""
Moves an xpath to the bottom of its section.
"""
query = {
"type": "config",
"action": "move",
"xpath": xpath,
"where": "before",
"dst": target,
}
response = __proxy__["panos.call"](query)
return _validate_response(response)
def _move_bottom(xpath):
"""
Moves an xpath to the bottom of its section.
"""
query = {"type": "config", "action": "move", "xpath": xpath, "where": "bottom"}
response = __proxy__["panos.call"](query)
return _validate_response(response)
def _move_top(xpath):
"""
Moves an xpath to the top of its section.
"""
query = {"type": "config", "action": "move", "xpath": xpath, "where": "top"}
response = __proxy__["panos.call"](query)
return _validate_response(response)
def _set_config(xpath, element):
"""
Sends a set request to the device.
"""
query = {"type": "config", "action": "set", "xpath": xpath, "element": element}
response = __proxy__["panos.call"](query)
return _validate_response(response)
def _validate_response(response):
"""
Validates a response from a Palo Alto device. Used to verify success of commands.
"""
if not response:
return False, "Unable to validate response from device."
elif "msg" in response:
if "line" in response["msg"]:
if response["msg"]["line"] == "already at the top":
return True, response
elif response["msg"]["line"] == "already at the bottom":
return True, response
else:
return False, response
elif response["msg"] == "command succeeded":
return True, response
else:
return False, response
elif "status" in response:
if response["status"] == "success":
return True, response
else:
return False, response
else:
return False, response
def add_config_lock(name):
"""
Prevent other users from changing configuration until the lock is released.
name: The name of the module function to execute.
SLS Example:
.. code-block:: yaml
panos/takelock:
panos.add_config_lock
"""
ret = _default_ret(name)
ret.update({"changes": __salt__["panos.add_config_lock"](), "result": True})
return ret
def address_exists(
name,
addressname=None,
vsys=1,
ipnetmask=None,
iprange=None,
fqdn=None,
description=None,
commit=False,
):
"""
Ensures that an address object exists in the configured state. If it does not exist or is not configured with the
specified attributes, it will be adjusted to match the specified values.
This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified
value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address
type.
name: The name of the module function to execute.
addressname(str): The name of the address object. The name is case-sensitive and can have up to 31 characters,
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
Panorama, unique within its device group and any ancestor or descendant device groups.
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where
the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6,
you specify only the network portion, not the host portion.
iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be IPv4 or both
can be IPv6.
fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are
subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the
entries are picked up at the refresh cycle.
description(str): A description for the policy (up to 255 characters).
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/address/h-10.10.10.10:
panos.address_exists:
- addressname: h-10.10.10.10
- vsys: 1
- ipnetmask: 10.10.10.10
- commit: False
panos/address/10.0.0.1-10.0.0.50:
panos.address_exists:
- addressname: r-10.0.0.1-10.0.0.50
- vsys: 1
- iprange: 10.0.0.1-10.0.0.50
- commit: False
panos/address/foo.bar.com:
panos.address_exists:
- addressname: foo.bar.com
- vsys: 1
- fqdn: foo.bar.com
- description: My fqdn object
- commit: False
"""
ret = _default_ret(name)
if not addressname:
ret.update({"comment": "The service name field must be provided."})
return ret
# Check if address object currently exists
address = __salt__["panos.get_address"](addressname, vsys)["result"]
if address and "entry" in address:
address = address["entry"]
else:
address = {}
element = ""
# Verify the arguments
if ipnetmask:
element = "<ip-netmask>{}</ip-netmask>".format(ipnetmask)
elif iprange:
element = "<ip-range>{}</ip-range>".format(iprange)
elif fqdn:
element = "<fqdn>{}</fqdn>".format(fqdn)
else:
ret.update({"comment": "A valid address type must be specified."})
return ret
if description:
element += "<description>{}</description>".format(description)
full_element = "<entry name='{}'>{}</entry>".format(addressname, element)
new_address = xml.to_dict(ET.fromstring(full_element), True)
if address == new_address:
ret.update(
{
"comment": "Address object already exists. No changes required.",
"result": True,
}
)
return ret
else:
xpath = (
"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/address/"
"entry[@name='{}']".format(vsys, addressname)
)
result, msg = _edit_config(xpath, full_element)
if not result:
ret.update({"comment": msg})
return ret
if commit is True:
ret.update(
{
"changes": {"before": address, "after": new_address},
"commit": __salt__["panos.commit"](),
"comment": "Address object successfully configured.",
"result": True,
}
)
else:
ret.update(
{
"changes": {"before": address, "after": new_address},
"comment": "Service object successfully configured.",
"result": True,
}
)
return ret
def address_group_exists(
name, groupname=None, vsys=1, members=None, description=None, commit=False
):
"""
Ensures that an address group object exists in the configured state. If it does not exist or is not configured with
the specified attributes, it will be adjusted to match the specified values.
This module will enforce group membership. If a group exists and contains members this state does not include,
those members will be removed and replaced with the specified members in the state.
name: The name of the module function to execute.
groupname(str): The name of the address group object. The name is case-sensitive and can have up to 31 characters,
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
Panorama, unique within its device group and any ancestor or descendant device groups.
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
members(str, list): The members of the address group. These must be valid address objects or address groups on the
system that already exist prior to the execution of this state.
description(str): A description for the policy (up to 255 characters).
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/address-group/my-group:
panos.address_group_exists:
- groupname: my-group
- vsys: 1
- members:
- my-address-object
- my-other-address-group
- description: A group that needs to exist
- commit: False
"""
ret = _default_ret(name)
if not groupname:
ret.update({"comment": "The group name field must be provided."})
return ret
# Check if address group object currently exists
group = __salt__["panos.get_address_group"](groupname, vsys)["result"]
if group and "entry" in group:
group = group["entry"]
else:
group = {}
# Verify the arguments
if members:
element = "<static>{}</static>".format(_build_members(members, True))
else:
ret.update({"comment": "The group members must be provided."})
return ret
if description:
element += "<description>{}</description>".format(description)
full_element = "<entry name='{}'>{}</entry>".format(groupname, element)
new_group = xml.to_dict(ET.fromstring(full_element), True)
if group == new_group:
ret.update(
{
"comment": "Address group object already exists. No changes required.",
"result": True,
}
)
return ret
else:
xpath = (
"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/address-group/"
"entry[@name='{}']".format(vsys, groupname)
)
result, msg = _edit_config(xpath, full_element)
if not result:
ret.update({"comment": msg})
return ret
if commit is True:
ret.update(
{
"changes": {"before": group, "after": new_group},
"commit": __salt__["panos.commit"](),
"comment": "Address group object successfully configured.",
"result": True,
}
)
else:
ret.update(
{
"changes": {"before": group, "after": new_group},
"comment": "Address group object successfully configured.",
"result": True,
}
)
return ret
def clone_config(name, xpath=None, newname=None, commit=False):
"""
Clone a specific XPATH and set it to a new name.
name: The name of the module function to execute.
xpath(str): The XPATH of the configuration API tree to clone.
newname(str): The new name of the XPATH clone.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/clonerule:
panos.clone_config:
- xpath: /config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules&from=/config/devices/
entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1']
- value: rule2
- commit: True
"""
ret = _default_ret(name)
if not xpath:
return ret
if not newname:
return ret
query = {"type": "config", "action": "clone", "xpath": xpath, "newname": newname}
result, response = _validate_response(__proxy__["panos.call"](query))
ret.update({"changes": response, "result": result})
if not result:
return ret
if commit is True:
ret.update({"commit": __salt__["panos.commit"](), "result": True})
return ret
def commit_config(name):
"""
Commits the candidate configuration to the running configuration.
name: The name of the module function to execute.
SLS Example:
.. code-block:: yaml
panos/commit:
panos.commit_config
"""
ret = _default_ret(name)
ret.update({"commit": __salt__["panos.commit"](), "result": True})
return ret
def delete_config(name, xpath=None, commit=False):
"""
Deletes a Palo Alto XPATH to a specific value.
Use the xpath parameter to specify the location of the object to be deleted.
name: The name of the module function to execute.
xpath(str): The XPATH of the configuration API tree to control.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/deletegroup:
panos.delete_config:
- xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address-group/entry[@name='test']
- commit: True
"""
ret = _default_ret(name)
if not xpath:
return ret
query = {"type": "config", "action": "delete", "xpath": xpath}
result, response = _validate_response(__proxy__["panos.call"](query))
ret.update({"changes": response, "result": result})
if not result:
return ret
if commit is True:
ret.update({"commit": __salt__["panos.commit"](), "result": True})
return ret
def download_software(name, version=None, synch=False, check=False):
"""
Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True
"""
ret = _default_ret(name)
if check is True:
__salt__["panos.check_software"]()
versions = __salt__["panos.get_software_info"]()["result"]
if (
"sw-updates" not in versions
or "versions" not in versions["sw-updates"]
or "entry" not in versions["sw-updates"]["versions"]
):
ret.update(
{
"comment": "Software version is not found in the local software list.",
"result": False,
}
)
return ret
for entry in versions["sw-updates"]["versions"]["entry"]:
if entry["version"] == version and entry["downloaded"] == "yes":
ret.update(
{"comment": "Software version is already downloaded.", "result": True}
)
return ret
ret.update(
{
"changes": __salt__["panos.download_software_version"](
version=version, synch=synch
)
}
)
versions = __salt__["panos.get_software_info"]()["result"]
if (
"sw-updates" not in versions
or "versions" not in versions["sw-updates"]
or "entry" not in versions["sw-updates"]["versions"]
):
ret.update({"result": False})
return ret
for entry in versions["sw-updates"]["versions"]["entry"]:
if entry["version"] == version and entry["downloaded"] == "yes":
ret.update({"result": True})
return ret
return ret
def edit_config(name, xpath=None, value=None, commit=False):
"""
Edits a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not
changed.
You can replace an existing object hierarchy at a specified location in the configuration with a new value. Use
the xpath parameter to specify the location of the object, including the node to be replaced.
This is the recommended state to enforce configurations on a xpath.
name: The name of the module function to execute.
xpath(str): The XPATH of the configuration API tree to control.
value(str): The XML value to edit. This must be a child to the XPATH.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/addressgroup:
panos.edit_config:
- xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address-group/entry[@name='test']
- value: <static><entry name='test'><member>abc</member><member>xyz</member></entry></static>
- commit: True
"""
ret = _default_ret(name)
# Verify if the current XPATH is equal to the specified value.
# If we are equal, no changes required.
xpath_split = xpath.split("/")
# Retrieve the head of the xpath for validation.
if len(xpath_split) > 0:
head = xpath_split[-1]
if "[" in head:
head = head.split("[")[0]
current_element = __salt__["panos.get_xpath"](xpath)["result"]
if head and current_element and head in current_element:
current_element = current_element[head]
else:
current_element = {}
new_element = xml.to_dict(ET.fromstring(value), True)
if current_element == new_element:
ret.update(
{
"comment": "XPATH is already equal to the specified value.",
"result": True,
}
)
return ret
result, msg = _edit_config(xpath, value)
ret.update({"comment": msg, "result": result})
if not result:
return ret
if commit is True:
ret.update(
{
"changes": {"before": current_element, "after": new_element},
"commit": __salt__["panos.commit"](),
"result": True,
}
)
else:
ret.update(
{
"changes": {"before": current_element, "after": new_element},
"result": True,
}
)
return ret
def move_config(name, xpath=None, where=None, dst=None, commit=False):
"""
Moves a XPATH value to a new location.
Use the xpath parameter to specify the location of the object to be moved, the where parameter to
specify type of move, and dst parameter to specify the destination path.
name: The name of the module function to execute.
xpath(str): The XPATH of the configuration API tree to move.
where(str): The type of move to execute. Valid options are after, before, top, bottom. The after and before
options will require the dst option to specify the destination of the action. The top action will move the
XPATH to the top of its structure. The botoom action will move the XPATH to the bottom of its structure.
dst(str): Optional. Specifies the destination to utilize for a move action. This is ignored for the top
or bottom action.
commit(bool): If true the firewall will commit the changes, if false do not commit changes. If the operation is
not successful, it will not commit.
SLS Example:
.. code-block:: yaml
panos/moveruletop:
panos.move_config:
- xpath: /config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1']
- where: top
- commit: True
panos/moveruleafter:
panos.move_config:
- xpath: /config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1']
- where: after
- dst: rule2
- commit: True
"""
ret = _default_ret(name)
if not xpath:
return ret
if not where:
return ret
if where == "after":
result, msg = _move_after(xpath, dst)
elif where == "before":
result, msg = _move_before(xpath, dst)
elif where == "top":
result, msg = _move_top(xpath)
elif where == "bottom":
result, msg = _move_bottom(xpath)
ret.update({"result": result, "comment": msg})
if not result:
return ret
if commit is True:
ret.update({"commit": __salt__["panos.commit"](), "result": True})
return ret
def remove_config_lock(name):
"""
Release config lock previously held.
name: The name of the module function to execute.
SLS Example:
.. code-block:: yaml
panos/takelock:
panos.remove_config_lock
"""
ret = _default_ret(name)
ret.update({"changes": __salt__["panos.remove_config_lock"](), "result": True})
return ret
def rename_config(name, xpath=None, newname=None, commit=False):
"""
Rename a Palo Alto XPATH to a specific value. This will always rename the value even if a change is not needed.
name: The name of the module function to execute.
xpath(str): The XPATH of the configuration API tree to control.
newname(str): The new name of the XPATH value.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/renamegroup:
panos.rename_config:
- xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address/entry[@name='old_address']
- value: new_address
- commit: True
"""
ret = _default_ret(name)
if not xpath:
return ret
if not newname:
return ret
query = {"type": "config", "action": "rename", "xpath": xpath, "newname": newname}
result, response = _validate_response(__proxy__["panos.call"](query))
ret.update({"changes": response, "result": result})
if not result:
return ret
if commit is True:
ret.update({"commit": __salt__["panos.commit"](), "result": True})
return ret
def security_rule_exists(
name,
rulename=None,
vsys="1",
action=None,
disabled=None,
sourcezone=None,
destinationzone=None,
source=None,
destination=None,
application=None,
service=None,
description=None,
logsetting=None,
logstart=None,
logend=None,
negatesource=None,
negatedestination=None,
profilegroup=None,
datafilter=None,
fileblock=None,
spyware=None,
urlfilter=None,
virus=None,
vulnerability=None,
wildfire=None,
move=None,
movetarget=None,
commit=False,
):
"""
Ensures that a security rule exists on the device. Also, ensure that all configurations are set appropriately.
This method will create the rule if it does not exist. If the rule does exist, it will ensure that the
configurations are set appropriately.
If the rule does not exist and is created, any value that is not provided will be provided as the default.
The action, to, from, source, destination, application, and service fields are mandatory and must be provided.
This will enforce the exact match of the rule. For example, if the rule is currently configured with the log-end
option, but this option is not specified in the state method, it will be removed and reset to the system default.
It is strongly recommended to specify all options to ensure proper operation.
When defining the profile group settings, the device can only support either a profile group or individual settings.
If both are specified, the profile group will be preferred and the individual settings are ignored. If neither are
specified, the value will be set to system default of none.
name: The name of the module function to execute.
rulename(str): The name of the security rule. The name is case-sensitive and can have up to 31 characters, which
can be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama,
unique within its device group and any ancestor or descendant device groups.
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
action(str): The action that the security rule will enforce. Valid options are: allow, deny, drop, reset-client,
reset-server, reset-both.
disabled(bool): Controls if the rule is disabled. Set 'True' to disable and 'False' to enable.
sourcezone(str, list): The source zone(s). The value 'any' will match all zones.
destinationzone(str, list): The destination zone(s). The value 'any' will match all zones.
source(str, list): The source address(es). The value 'any' will match all addresses.
destination(str, list): The destination address(es). The value 'any' will match all addresses.
application(str, list): The application(s) matched. The value 'any' will match all applications.
service(str, list): The service(s) matched. The value 'any' will match all services. The value
'application-default' will match based upon the application defined ports.
description(str): A description for the policy (up to 255 characters).
logsetting(str): The name of a valid log forwarding profile.
logstart(bool): Generates a traffic log entry for the start of a session (disabled by default).
logend(bool): Generates a traffic log entry for the end of a session (enabled by default).
negatesource(bool): Match all but the specified source addresses.
negatedestination(bool): Match all but the specified destination addresses.
profilegroup(str): A valid profile group name.
datafilter(str): A valid data filter profile name. Ignored with the profilegroup option set.
fileblock(str): A valid file blocking profile name. Ignored with the profilegroup option set.
spyware(str): A valid spyware profile name. Ignored with the profilegroup option set.
urlfilter(str): A valid URL filtering profile name. Ignored with the profilegroup option set.
virus(str): A valid virus profile name. Ignored with the profilegroup option set.
vulnerability(str): A valid vulnerability profile name. Ignored with the profilegroup option set.
wildfire(str): A valid vulnerability profile name. Ignored with the profilegroup option set.
move(str): An optional argument that ensure the rule is moved to a specific location. Valid options are 'top',
'bottom', 'before', or 'after'. The 'before' and 'after' options require the use of the 'movetarget' argument
to define the location of the move request.
movetarget(str): An optional argument that defines the target of the move operation if the move argument is
set to 'before' or 'after'.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/rulebase/security/rule01:
panos.security_rule_exists:
- rulename: rule01
- vsys: 1
- action: allow
- disabled: False
- sourcezone: untrust
- destinationzone: trust
- source:
- 10.10.10.0/24
- 1.1.1.1
- destination:
- 2.2.2.2-2.2.2.4
- application:
- any
- service:
- tcp-25
- description: My test security rule
- logsetting: logprofile
- logstart: False
- logend: True
- negatesource: False
- negatedestination: False
- profilegroup: myprofilegroup
- move: top
- commit: False
panos/rulebase/security/rule01:
panos.security_rule_exists:
- rulename: rule01
- vsys: 1
- action: allow
- disabled: False
- sourcezone: untrust
- destinationzone: trust
- source:
- 10.10.10.0/24
- 1.1.1.1
- destination:
- 2.2.2.2-2.2.2.4
- application:
- any
- service:
- tcp-25
- description: My test security rule
- logsetting: logprofile
- logstart: False
- logend: False
- datafilter: foobar
- fileblock: foobar
- spyware: foobar
- urlfilter: foobar
- virus: foobar
- vulnerability: foobar
- wildfire: foobar
- move: after
- movetarget: rule02
- commit: False
"""
ret = _default_ret(name)
if not rulename:
return ret
# Check if rule currently exists
rule = __salt__["panos.get_security_rule"](rulename, vsys)["result"]
if rule and "entry" in rule:
rule = rule["entry"]
else:
rule = {}
# Build the rule element
element = ""
if sourcezone:
element += "<from>{}</from>".format(_build_members(sourcezone, True))
else:
ret.update({"comment": "The sourcezone field must be provided."})
return ret
if destinationzone:
element += "<to>{}</to>".format(_build_members(destinationzone, True))
else:
ret.update({"comment": "The destinationzone field must be provided."})
return ret
if source:
element += "<source>{}</source>".format(_build_members(source, True))
else:
ret.update({"comment": "The source field must be provided."})
return
if destination:
element += "<destination>{}</destination>".format(
_build_members(destination, True)
)
else:
ret.update({"comment": "The destination field must be provided."})
return ret
if application:
element += "<application>{}</application>".format(
_build_members(application, True)
)
else:
ret.update({"comment": "The application field must be provided."})
return ret
if service:
element += "<service>{}</service>".format(_build_members(service, True))
else:
ret.update({"comment": "The service field must be provided."})
return ret
if action:
element += "<action>{}</action>".format(action)
else:
ret.update({"comment": "The action field must be provided."})
return ret
if disabled is not None:
if disabled:
element += "<disabled>yes</disabled>"
else:
element += "<disabled>no</disabled>"
if description:
element += "<description>{}</description>".format(description)
if logsetting:
element += "<log-setting>{}</log-setting>".format(logsetting)
if logstart is not None:
if logstart:
element += "<log-start>yes</log-start>"
else:
element += "<log-start>no</log-start>"
if logend is not None:
if logend:
element += "<log-end>yes</log-end>"
else:
element += "<log-end>no</log-end>"
if negatesource is not None:
if negatesource:
element += "<negate-source>yes</negate-source>"
else:
element += "<negate-source>no</negate-source>"
if negatedestination is not None:
if negatedestination:
element += "<negate-destination>yes</negate-destination>"
else:
element += "<negate-destination>no</negate-destination>"
# Build the profile settings
profile_string = None
if profilegroup:
profile_string = "<group><member>{}</member></group>".format(profilegroup)
else:
member_string = ""
if datafilter:
member_string += (
"<data-filtering><member>{}</member></data-filtering>".format(
datafilter
)
)
if fileblock:
member_string += (
"<file-blocking><member>{}</member></file-blocking>".format(fileblock)
)
if spyware:
member_string += "<spyware><member>{}</member></spyware>".format(spyware)
if urlfilter:
member_string += (
"<url-filtering><member>{}</member></url-filtering>".format(urlfilter)
)
if virus:
member_string += "<virus><member>{}</member></virus>".format(virus)
if vulnerability:
member_string += (
"<vulnerability><member>{}</member></vulnerability>".format(
vulnerability
)
)
if wildfire:
member_string += (
"<wildfire-analysis><member>{}</member></wildfire-analysis>".format(
wildfire
)
)
if member_string != "":
profile_string = "<profiles>{}</profiles>".format(member_string)
if profile_string:
element += "<profile-setting>{}</profile-setting>".format(profile_string)
full_element = "<entry name='{}'>{}</entry>".format(rulename, element)
new_rule = xml.to_dict(ET.fromstring(full_element), True)
config_change = False
if rule == new_rule:
ret.update({"comment": "Security rule already exists. No changes required."})
else:
config_change = True
xpath = (
"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/rulebase/"
"security/rules/entry[@name='{}']".format(vsys, rulename)
)
result, msg = _edit_config(xpath, full_element)
if not result:
ret.update({"comment": msg})
return ret
ret.update(
{
"changes": {"before": rule, "after": new_rule},
"comment": "Security rule verified successfully.",
}
)
if move:
movepath = (
"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/rulebase/"
"security/rules/entry[@name='{}']".format(vsys, rulename)
)
move_result = False
move_msg = ""
if move == "before" and movetarget:
move_result, move_msg = _move_before(movepath, movetarget)
elif move == "after":
move_result, move_msg = _move_after(movepath, movetarget)
elif move == "top":
move_result, move_msg = _move_top(movepath)
elif move == "bottom":
move_result, move_msg = _move_bottom(movepath)
if config_change:
ret.update(
{"changes": {"before": rule, "after": new_rule, "move": move_msg}}
)
else:
ret.update({"changes": {"move": move_msg}})
if not move_result:
ret.update({"comment": move_msg})
return ret
if commit is True:
ret.update({"commit": __salt__["panos.commit"](), "result": True})
else:
ret.update({"result": True})
return ret
def service_exists(
name,
servicename=None,
vsys=1,
protocol=None,
port=None,
description=None,
commit=False,
):
"""
Ensures that a service object exists in the configured state. If it does not exist or is not configured with the
specified attributes, it will be adjusted to match the specified values.
name: The name of the module function to execute.
servicename(str): The name of the security object. The name is case-sensitive and can have up to 31 characters,
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
Panorama, unique within its device group and any ancestor or descendant device groups.
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
protocol(str): The protocol that is used by the service object. The only valid options are tcp and udp.
port(str): The port number that is used by the service object. This can be specified as a single integer or a
valid range of ports.
description(str): A description for the policy (up to 255 characters).
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/service/tcp-80:
panos.service_exists:
- servicename: tcp-80
- vsys: 1
- protocol: tcp
- port: 80
- description: Hypertext Transfer Protocol
- commit: False
panos/service/udp-500-550:
panos.service_exists:
- servicename: udp-500-550
- vsys: 3
- protocol: udp
- port: 500-550
- commit: False
"""
ret = _default_ret(name)
if not servicename:
ret.update({"comment": "The service name field must be provided."})
return ret
# Check if service object currently exists
service = __salt__["panos.get_service"](servicename, vsys)["result"]
if service and "entry" in service:
service = service["entry"]
else:
service = {}
# Verify the arguments
if not protocol and protocol not in ["tcp", "udp"]:
ret.update({"comment": "The protocol must be provided and must be tcp or udp."})
return ret
if not port:
ret.update({"comment": "The port field must be provided."})
return ret
element = "<protocol><{0}><port>{1}</port></{0}></protocol>".format(protocol, port)
if description:
element += "<description>{}</description>".format(description)
full_element = "<entry name='{}'>{}</entry>".format(servicename, element)
new_service = xml.to_dict(ET.fromstring(full_element), True)
if service == new_service:
ret.update(
{
"comment": "Service object already exists. No changes required.",
"result": True,
}
)
return ret
else:
xpath = (
"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/service/"
"entry[@name='{}']".format(vsys, servicename)
)
result, msg = _edit_config(xpath, full_element)
if not result:
ret.update({"comment": msg})
return ret
if commit is True:
ret.update(
{
"changes": {"before": service, "after": new_service},
"commit": __salt__["panos.commit"](),
"comment": "Service object successfully configured.",
"result": True,
}
)
else:
ret.update(
{
"changes": {"before": service, "after": new_service},
"comment": "Service object successfully configured.",
"result": True,
}
)
return ret
def service_group_exists(
name, groupname=None, vsys=1, members=None, description=None, commit=False
):
"""
Ensures that a service group object exists in the configured state. If it does not exist or is not configured with
the specified attributes, it will be adjusted to match the specified values.
This module will enforce group membership. If a group exists and contains members this state does not include,
those members will be removed and replaced with the specified members in the state.
name: The name of the module function to execute.
groupname(str): The name of the service group object. The name is case-sensitive and can have up to 31 characters,
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
Panorama, unique within its device group and any ancestor or descendant device groups.
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
members(str, list): The members of the service group. These must be valid service objects or service groups on the
system that already exist prior to the execution of this state.
description(str): A description for the policy (up to 255 characters).
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/service-group/my-group:
panos.service_group_exists:
- groupname: my-group
- vsys: 1
- members:
- tcp-80
- custom-port-group
- description: A group that needs to exist
- commit: False
"""
ret = _default_ret(name)
if not groupname:
ret.update({"comment": "The group name field must be provided."})
return ret
# Check if service group object currently exists
group = __salt__["panos.get_service_group"](groupname, vsys)["result"]
if group and "entry" in group:
group = group["entry"]
else:
group = {}
# Verify the arguments
if members:
element = "<members>{}</members>".format(_build_members(members, True))
else:
ret.update({"comment": "The group members must be provided."})
return ret
if description:
element += "<description>{}</description>".format(description)
full_element = "<entry name='{}'>{}</entry>".format(groupname, element)
new_group = xml.to_dict(ET.fromstring(full_element), True)
if group == new_group:
ret.update(
{
"comment": "Service group object already exists. No changes required.",
"result": True,
}
)
return ret
else:
xpath = (
"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/service-group/"
"entry[@name='{}']".format(vsys, groupname)
)
result, msg = _edit_config(xpath, full_element)
if not result:
ret.update({"comment": msg})
return ret
if commit is True:
ret.update(
{
"changes": {"before": group, "after": new_group},
"commit": __salt__["panos.commit"](),
"comment": "Service group object successfully configured.",
"result": True,
}
)
else:
ret.update(
{
"changes": {"before": group, "after": new_group},
"comment": "Service group object successfully configured.",
"result": True,
}
)
return ret
def set_config(name, xpath=None, value=None, commit=False):
"""
Sets a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not
changed.
You can add or create a new object at a specified location in the configuration hierarchy. Use the xpath parameter
to specify the location of the object in the configuration
name: The name of the module function to execute.
xpath(str): The XPATH of the configuration API tree to control.
value(str): The XML value to set. This must be a child to the XPATH.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/hostname:
panos.set_config:
- xpath: /config/devices/entry[@name='localhost.localdomain']/deviceconfig/system
- value: <hostname>foobar</hostname>
- commit: True
"""
ret = _default_ret(name)
result, msg = _set_config(xpath, value)
ret.update({"comment": msg, "result": result})
if not result:
return ret
if commit is True:
ret.update({"commit": __salt__["panos.commit"](), "result": True})
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/panos.py | 0.658527 | 0.203787 | panos.py | pypi |
def present(name=None, data=None, ensure_data=True, **api_opts):
"""
Ensure the CNAME with the given data is present.
name
CNAME of record
data
raw CNAME api data see: https://INFOBLOX/wapidoc
State example:
.. code-block:: yaml
infoblox_cname.present:
- name: example-ha-0.domain.com
- data:
name: example-ha-0.domain.com
canonical: example.domain.com
zone: example.com
view: Internal
comment: Example comment
infoblox_cname.present:
- name: example-ha-0.domain.com
- data:
name: example-ha-0.domain.com
canonical: example.domain.com
zone: example.com
view: Internal
comment: Example comment
- api_url: https://INFOBLOX/wapi/v1.2.1
- api_username: username
- api_password: passwd
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not data:
data = {}
if "name" not in data:
data.update({"name": name})
obj = __salt__["infoblox.get_cname"](name=name, **api_opts)
if obj is None:
# perhaps the user updated the name
obj = __salt__["infoblox.get_cname"](name=data["name"], **api_opts)
if obj:
# warn user that the data was updated and does not match
ret["result"] = False
ret[
"comment"
] = "** please update the name: {} to equal the updated data name {}".format(
name, data["name"]
)
return ret
if obj:
if not ensure_data:
ret["result"] = True
ret[
"comment"
] = "infoblox record already created (supplied fields not ensured to match)"
return ret
diff = __salt__["infoblox.diff_objects"](data, obj)
if not diff:
ret["result"] = True
ret["comment"] = (
"supplied fields already updated (note: removing fields might not"
" update)"
)
return ret
if diff:
ret["changes"] = {"diff": diff}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to update infoblox record"
return ret
new_obj = __salt__["infoblox.update_object"](
obj["_ref"], data=data, **api_opts
)
ret["result"] = True
ret["comment"] = (
"infoblox record fields updated (note: removing fields might not"
" update)"
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to create infoblox record {}".format(
data["name"]
)
return ret
new_obj_ref = __salt__["infoblox.create_cname"](data=data, **api_opts)
new_obj = __salt__["infoblox.get_cname"](name=name, **api_opts)
ret["result"] = True
ret["comment"] = "infoblox record created"
ret["changes"] = {"old": "None", "new": {"_ref": new_obj_ref, "data": new_obj}}
return ret
def absent(name=None, canonical=None, **api_opts):
"""
Ensure the CNAME with the given name or canonical name is removed
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
obj = __salt__["infoblox.get_cname"](name=name, canonical=canonical, **api_opts)
if not obj:
ret["result"] = True
ret["comment"] = "infoblox already removed"
return ret
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"old": obj, "new": "absent"}
return ret
if __salt__["infoblox.delete_cname"](name=name, canonical=canonical, **api_opts):
ret["result"] = True
ret["changes"] = {"old": obj, "new": "absent"}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/infoblox_cname.py | 0.689619 | 0.475423 | infoblox_cname.py | pypi |
import salt.utils.functools
def send(
name,
data=None,
preload=None,
with_env=False,
with_grains=False,
with_pillar=False,
show_changed=True,
**kwargs
):
"""
Send an event to the Salt Master
.. versionadded:: 2014.7.0
Accepts the same arguments as the :py:func:`event.send
<salt.modules.event.send>` execution module of the same name,
with the additional argument:
:param show_changed: If ``True``, state will show as changed with the data
argument as the change value. If ``False``, shows as unchanged.
Example:
.. code-block:: yaml
# ...snip bunch of states above
mycompany/mystaterun/status/update:
event.send:
- data:
status: "Half-way through the state run!"
# ...snip bunch of states below
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if show_changed:
ret["changes"] = {"tag": name, "data": data}
else:
ret["changes"] = {}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Event would have been fired"
return ret
ret["result"] = __salt__["event.send"](
name,
data=data,
preload=preload,
with_env=with_env,
with_grains=with_grains,
with_pillar=with_pillar,
**kwargs
)
ret["comment"] = "Event fired"
return ret
def wait(name, sfun=None, data=None):
"""
Fire an event on the Salt master event bus if called from a watch statement
.. versionadded:: 2014.7.0
Example:
.. code-block:: jinja
# Stand up a new web server.
apache:
pkg:
- installed
- name: httpd
service:
- running
- enable: True
- name: httpd
# Notify the load balancer to update the pool once Apache is running.
refresh_pool:
event:
- wait
- name: mycompany/loadbalancer/pool/update
- data:
new_web_server_ip: {{ grains['ipv4'] | first() }}
- watch:
- pkg: apache
"""
# Noop. The state system will call the mod_watch function instead.
return {"name": name, "changes": {}, "result": True, "comment": ""}
mod_watch = salt.utils.functools.alias_function(send, "mod_watch")
fire_master = salt.utils.functools.alias_function(send, "fire_master") | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/event.py | 0.698329 | 0.350227 | event.py | pypi |
import logging
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make these states available if a kernelpkg provider has been detected or
assigned for this minion
"""
if "kernelpkg.upgrade" in __salt__:
return True
return (False, "kernelpkg module could not be loaded")
def latest_installed(name, **kwargs): # pylint: disable=unused-argument
"""
Ensure that the latest version of the kernel available in the
repositories is installed.
.. note::
This state only installs the kernel, but does not activate it.
The new kernel should become active at the next reboot.
See :py:func:`kernelpkg.needs_reboot <salt.modules.kernelpkg_linux_yum.needs_reboot>` for details on
how to detect this condition, and :py:func:`~salt.states.kernelpkg.latest_active`
to initiale a reboot when needed.
name
Arbitrary name for the state. Does not affect behavior.
"""
installed = __salt__["kernelpkg.list_installed"]()
upgrade = __salt__["kernelpkg.latest_available"]()
ret = {"name": name}
if upgrade in installed:
ret["result"] = True
ret["comment"] = "The latest kernel package is already installed: {}".format(
upgrade
)
ret["changes"] = {}
else:
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {}
ret["comment"] = "The latest kernel package will be installed: {}".format(
upgrade
)
else:
result = __salt__["kernelpkg.upgrade"]()
ret["result"] = True
ret["changes"] = result["upgrades"]
ret[
"comment"
] = "The latest kernel package has been installed, but not activated."
return ret
def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argument
"""
Initiate a reboot if the running kernel is not the latest one installed.
.. note::
This state does not install any patches. It only compares the running
kernel version number to other kernel versions also installed in the
system. If the running version is not the latest one installed, this
state will reboot the system.
See :py:func:`kernelpkg.upgrade <salt.modules.kernelpkg_linux_yum.upgrade>` and
:py:func:`~salt.states.kernelpkg.latest_installed`
for ways to install new kernel packages.
This module does not attempt to understand or manage boot loader configurations
it is possible to have a new kernel installed, but a boot loader configuration
that will never activate it. For this reason, it would not be advisable to
schedule this state to run automatically.
Because this state function may cause the system to reboot, it may be preferable
to move it to the very end of the state run.
See :py:func:`~salt.states.kernelpkg.latest_wait`
for a waitable state that can be called with the `listen` requesite.
name
Arbitrary name for the state. Does not affect behavior.
at_time
The wait time in minutes before the system will be rebooted.
"""
active = __salt__["kernelpkg.active"]()
latest = __salt__["kernelpkg.latest_installed"]()
ret = {"name": name}
if __salt__["kernelpkg.needs_reboot"]():
ret["comment"] = "The system will be booted to activate kernel: {}".format(
latest
)
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"kernel": {"old": active, "new": latest}}
else:
__salt__["system.reboot"](at_time=at_time)
ret["result"] = True
ret["changes"] = {"kernel": {"old": active, "new": latest}}
else:
ret["result"] = True
ret["comment"] = "The latest installed kernel package is active: {}".format(
active
)
ret["changes"] = {}
return ret
def latest_wait(name, at_time=None, **kwargs): # pylint: disable=unused-argument
"""
Initiate a reboot if the running kernel is not the latest one installed. This is the
waitable version of :py:func:`~salt.states.kernelpkg.latest_active` and
will not take any action unless triggered by a watch or listen requesite.
.. note::
Because this state function may cause the system to reboot, it may be preferable
to move it to the very end of the state run using `listen` or `listen_in` requisites.
.. code-block:: yaml
system-up-to-date:
pkg.uptodate:
- refresh: true
boot-latest-kernel:
kernelpkg.latest_wait:
- at_time: 1
- listen:
- pkg: system-up-to-date
name
Arbitrary name for the state. Does not affect behavior.
at_time
The wait time in minutes before the system will be rebooted.
"""
return {"name": name, "changes": {}, "result": True, "comment": ""}
def mod_watch(name, sfun, **kwargs):
"""
Execute a kernelpkg state based on a watch or listen call
"""
if sfun in ("latest_active", "latest_wait"):
return latest_active(name, **kwargs)
else:
return {
"name": name,
"changes": {},
"comment": "kernelpkg.{} does not work with the watch requisite.".format(
sfun
),
"result": False,
} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/kernelpkg.py | 0.721253 | 0.271726 | kernelpkg.py | pypi |
__virtualname__ = "keystone_endpoint"
def __virtual__():
if "keystoneng.endpoint_get" in __salt__:
return __virtualname__
return (
False,
"The keystoneng execution module failed to load: shade python module is not"
" available",
)
def _common(ret, name, service_name, kwargs):
"""
Returns: tuple whose first element is a bool indicating success or failure
and the second element is either a ret dict for salt or an object
"""
if "interface" not in kwargs and "public_url" not in kwargs:
kwargs["interface"] = name
service = __salt__["keystoneng.service_get"](name_or_id=service_name)
if not service:
ret["comment"] = "Cannot find service"
ret["result"] = False
return (False, ret)
filters = kwargs.copy()
filters.pop("enabled", None)
filters.pop("url", None)
filters["service_id"] = service.id
kwargs["service_name_or_id"] = service.id
endpoints = __salt__["keystoneng.endpoint_search"](filters=filters)
if len(endpoints) > 1:
ret["comment"] = "Multiple endpoints match criteria"
ret["result"] = False
return ret
endpoint = endpoints[0] if endpoints else None
return (True, endpoint)
def present(name, service_name, auth=None, **kwargs):
"""
Ensure an endpoint exists and is up-to-date
name
Interface name
url
URL of the endpoint
service_name
Service name or ID
region
The region name to assign the endpoint
enabled
Boolean to control if endpoint is enabled
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["keystoneng.setup_clouds"](auth)
success, val = _, endpoint = _common(ret, name, service_name, kwargs)
if not success:
return val
if not endpoint:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Endpoint will be created."
return ret
# NOTE(SamYaple): Endpoints are returned as a list which can contain
# several items depending on the options passed
endpoints = __salt__["keystoneng.endpoint_create"](**kwargs)
if len(endpoints) == 1:
ret["changes"] = endpoints[0]
else:
for i, endpoint in enumerate(endpoints):
ret["changes"][i] = endpoint
ret["comment"] = "Created endpoint"
return ret
changes = __salt__["keystoneng.compare_changes"](endpoint, **kwargs)
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Endpoint will be updated."
return ret
kwargs["endpoint_id"] = endpoint.id
__salt__["keystoneng.endpoint_update"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated endpoint"
return ret
def absent(name, service_name, auth=None, **kwargs):
"""
Ensure an endpoint does not exists
name
Interface name
url
URL of the endpoint
service_name
Service name or ID
region
The region name to assign the endpoint
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
__salt__["keystoneng.setup_clouds"](auth)
success, val = _, endpoint = _common(ret, name, service_name, kwargs)
if not success:
return val
if endpoint:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"id": endpoint.id}
ret["comment"] = "Endpoint will be deleted."
return ret
__salt__["keystoneng.endpoint_delete"](id=endpoint.id)
ret["changes"]["id"] = endpoint.id
ret["comment"] = "Deleted endpoint"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/keystone_endpoint.py | 0.735262 | 0.155816 | keystone_endpoint.py | pypi |
from os import path
__monitor__ = [
"status",
]
def _validate_int(name, value, limits=(), strip="%"):
"""
Validate the named integer within the supplied limits inclusive and
strip supplied unit characters
"""
comment = ""
# Must be integral
try:
if isinstance(value, str):
value = value.strip(" " + strip)
value = int(value)
except (TypeError, ValueError):
comment += "{} must be an integer ".format(name)
# Must be in range
else:
if len(limits) == 2:
if value < limits[0] or value > limits[1]:
comment += "{0} must be in the range [{1[0]}, {1[1]}] ".format(
name, limits
)
return value, comment
def _status_mount(name, ret, minimum, maximum, absolute, free, data):
# Get used space
if absolute:
used = int(data[name]["used"])
available = int(data[name]["available"])
else:
# POSIX-compliant df output reports percent used as 'capacity'
used = int(data[name]["capacity"].strip("%"))
available = 100 - used
# Collect return information
ret["data"] = data[name]
return _check_min_max(absolute, free, available, used, maximum, minimum, ret)
def _status_path(directory, ret, minimum, maximum, absolute, free):
if path.isdir(directory) is False:
ret["result"] = False
ret["comment"] += "Directory {} does not exist or is not a directory".format(
directory
)
return ret
data = __salt__["status.diskusage"](directory)
if absolute:
used = int(data[directory]["total"]) - int(data[directory]["available"])
available = int(data[directory]["available"])
else:
if int(data[directory]["total"]) == 0:
used = 0
available = 0
else:
used = round(
float(int(data[directory]["total"]) - int(data[directory]["available"]))
/ int(data[directory]["total"])
* 100,
1,
)
available = round(
float(data[directory]["available"])
/ int(data[directory]["total"])
* 100,
1,
)
ret["data"] = data
return _check_min_max(absolute, free, available, used, maximum, minimum, ret)
def _check_min_max(absolute, free, available, used, maximum, minimum, ret):
unit = "KB" if absolute else "%"
if minimum is not None:
if free:
if available < minimum:
ret["comment"] = (
"Disk available space is below minimum"
" of {0} {2} at {1} {2}"
"".format(minimum, available, unit)
)
return ret
else:
if used < minimum:
ret[
"comment"
] = "Disk used space is below minimum of {0} {2} at {1} {2}".format(
minimum, used, unit
)
return ret
if maximum is not None:
if free:
if available > maximum:
ret["comment"] = (
"Disk available space is above maximum"
" of {0} {2} at {1} {2}"
"".format(maximum, available, unit)
)
return ret
else:
if used > maximum:
ret[
"comment"
] = "Disk used space is above maximum of {0} {2} at {1} {2}".format(
maximum, used, unit
)
return ret
ret["comment"] = "Disk used space in acceptable range"
ret["result"] = True
return ret
def status(name, maximum=None, minimum=None, absolute=False, free=False):
"""
Return the current disk usage stats for the named mount point
name
Disk mount or directory for which to check used space
maximum
The maximum disk utilization
minimum
The minimum disk utilization
absolute
By default, the utilization is measured in percentage. Set
the `absolute` flag to use kilobytes.
.. versionadded:: 2016.11.0
free
By default, `minimum` & `maximum` refer to the amount of used space.
Set to `True` to evaluate the free space instead.
"""
# Monitoring state, no changes will be made so no test interface needed
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
"data": {},
} # Data field for monitoring state
# Validate extrema
if maximum is not None:
if not absolute:
maximum, comment = _validate_int("maximum", maximum, [0, 100])
else:
maximum, comment = _validate_int("maximum", maximum, strip="KB")
ret["comment"] += comment
if minimum is not None:
if not absolute:
minimum, comment = _validate_int("minimum", minimum, [0, 100])
else:
minimum, comment = _validate_int("minimum", minimum, strip="KB")
ret["comment"] += comment
if minimum is not None and maximum is not None:
if minimum >= maximum:
ret["comment"] += "minimum must be less than maximum "
if ret["comment"]:
return ret
data = __salt__["disk.usage"]()
# Validate name
if name not in data:
ret["comment"] += "Disk mount {} not present. ".format(name)
return _status_path(name, ret, minimum, maximum, absolute, free)
else:
return _status_mount(name, ret, minimum, maximum, absolute, free, data) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/disk.py | 0.710628 | 0.211376 | disk.py | pypi |
def __virtual__():
"""
Only load if the lvs module is available in __salt__
"""
if "lvs.get_rules" in __salt__:
return "lvs_service"
return (False, "lvs module could not be loaded")
def present(
name,
protocol=None,
service_address=None,
scheduler="wlc",
):
"""
Ensure that the named service is present.
name
The LVS service name
protocol
The service protocol
service_address
The LVS service address
scheduler
Algorithm for allocating TCP connections and UDP datagrams to real servers.
.. code-block:: yaml
lvstest:
lvs_service.present:
- service_address: 1.1.1.1:80
- protocol: tcp
- scheduler: rr
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check service
service_check = __salt__["lvs.check_service"](
protocol=protocol, service_address=service_address
)
if service_check is True:
service_rule_check = __salt__["lvs.check_service"](
protocol=protocol, service_address=service_address, scheduler=scheduler
)
if service_rule_check is True:
ret["comment"] = "LVS Service {} is present".format(name)
return ret
else:
if __opts__["test"]:
ret["result"] = None
ret[
"comment"
] = "LVS Service {} is present but some options should update".format(
name
)
return ret
else:
service_edit = __salt__["lvs.edit_service"](
protocol=protocol,
service_address=service_address,
scheduler=scheduler,
)
if service_edit is True:
ret["comment"] = "LVS Service {} has been updated".format(name)
ret["changes"][name] = "Update"
return ret
else:
ret["result"] = False
ret["comment"] = "LVS Service {} update failed".format(name)
return ret
else:
if __opts__["test"]:
ret[
"comment"
] = "LVS Service {} is not present and needs to be created".format(name)
ret["result"] = None
return ret
else:
service_add = __salt__["lvs.add_service"](
protocol=protocol, service_address=service_address, scheduler=scheduler
)
if service_add is True:
ret["comment"] = "LVS Service {} has been created".format(name)
ret["changes"][name] = "Present"
return ret
else:
ret["comment"] = "LVS Service {} create failed({})".format(
name, service_add
)
ret["result"] = False
return ret
def absent(name, protocol=None, service_address=None):
"""
Ensure the LVS service is absent.
name
The name of the LVS service
protocol
The service protocol
service_address
The LVS service address
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check if service exists and remove it
service_check = __salt__["lvs.check_service"](
protocol=protocol, service_address=service_address
)
if service_check is True:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "LVS Service {} is present and needs to be removed".format(
name
)
return ret
service_delete = __salt__["lvs.delete_service"](
protocol=protocol, service_address=service_address
)
if service_delete is True:
ret["comment"] = "LVS Service {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "LVS Service {} removed failed({})".format(
name, service_delete
)
ret["result"] = False
return ret
else:
ret[
"comment"
] = "LVS Service {} is not present, so it cannot be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/lvs_service.py | 0.645567 | 0.202325 | lvs_service.py | pypi |
import salt.utils.data
def __virtual__():
"""
Load only if win_servermanager is loaded
"""
if "win_servermanager.install" in __salt__:
return "win_servermanager"
return (False, "win_servermanager module could not be loaded")
def installed(
name,
features=None,
recurse=False,
restart=False,
source=None,
exclude=None,
**kwargs
):
"""
Install the windows feature. To install a single feature, use the ``name``
parameter. To install multiple features, use the ``features`` parameter.
.. note::
Some features require reboot after un/installation. If so, until the
server is restarted other features can not be installed!
Args:
name (str):
Short name of the feature (the right column in
win_servermanager.list_available). This can be a single feature or a
string of features in a comma delimited list (no spaces)
.. note::
A list is not allowed in the name parameter of any state. Use
the ``features`` parameter if you want to pass the features as a
list
features (Optional[list]):
A list of features to install. If this is passed it will be used
instead of the ``name`` parameter.
.. versionadded:: 2018.3.0
recurse (Optional[bool]):
Install all sub-features as well. If the feature is installed but
one of its sub-features are not installed set this will install
additional sub-features. This argument was previously renamed from
``force``. To ensure backwards compatibility ``force`` will
continue to work but please update your states to use the preferred
``recurse`` arg.
source (Optional[str]):
Path to the source files if missing from the target system. None
means that the system will use windows update services to find the
required files. Default is None
restart (Optional[bool]):
Restarts the computer when installation is complete, if required by
the role/feature installed. Default is False
exclude (Optional[str]):
The name of the feature to exclude when installing the named
feature. This can be a single feature, a string of features in a
comma-delimited list (no spaces), or a list of features.
.. warning::
As there is no exclude option for the ``Add-WindowsFeature``
or ``Install-WindowsFeature`` PowerShell commands the features
named in ``exclude`` will be installed with other sub-features
and will then be removed. **If the feature named in ``exclude``
is not a sub-feature of one of the installed items it will still
be removed.**
Example:
Do not use the role or feature names mentioned in the PKGMGR
documentation. To get a list of available roles and features run the
following command:
.. code-block:: bash
salt <minion_name> win_servermanager.list_available
Use the name in the right column of the results.
.. code-block:: yaml
# Installs the IIS Web Server Role (Web-Server)
IIS-WebServerRole:
win_servermanager.installed:
- recurse: True
- name: Web-Server
# Install multiple features, exclude the Web-Service
install_multiple_features:
win_servermanager.installed:
- recurse: True
- features:
- RemoteAccess
- XPS-Viewer
- SNMP-Service
- exclude:
- Web-Server
"""
if "force" in kwargs:
kwargs.pop("force")
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Check if features is not passed, use name. Split commas
if features is None:
features = name.split(",")
# Make sure features is a list, split commas
if not isinstance(features, list):
features = features.split(",")
# Determine if the feature is installed
old = __salt__["win_servermanager.list_installed"]()
cur_feat = []
for feature in features:
if feature not in old:
ret["changes"][feature] = "Will be installed recurse={}".format(recurse)
elif recurse:
ret["changes"][feature] = "Already installed but might install sub-features"
else:
cur_feat.append(feature)
if cur_feat:
cur_feat.insert(0, "The following features are already installed:")
ret["comment"] = "\n- ".join(cur_feat)
if not ret["changes"]:
return ret
if __opts__["test"]:
ret["result"] = None
return ret
# Install the features
status = __salt__["win_servermanager.install"](
features, recurse=recurse, restart=restart, source=source, exclude=exclude
)
ret["result"] = status["Success"]
# Show items failed to install
fail_feat = []
new_feat = []
rem_feat = []
for feature in status["Features"]:
# Features that failed to install or be removed
if not status["Features"][feature].get("Success", True):
fail_feat.append("- {}".format(feature))
# Features that installed
elif "(exclude)" not in status["Features"][feature]["Message"]:
new_feat.append("- {}".format(feature))
# Show items that were removed because they were part of `exclude`
elif "(exclude)" in status["Features"][feature]["Message"]:
rem_feat.append("- {}".format(feature))
if fail_feat:
fail_feat.insert(0, "Failed to install the following:")
if new_feat:
new_feat.insert(0, "Installed the following:")
if rem_feat:
rem_feat.insert(0, "Removed the following (exclude):")
ret["comment"] = "\n".join(fail_feat + new_feat + rem_feat)
# Get the changes
new = __salt__["win_servermanager.list_installed"]()
ret["changes"] = salt.utils.data.compare_dicts(old, new)
return ret
def removed(name, features=None, remove_payload=False, restart=False):
"""
Remove the windows feature To remove a single feature, use the ``name``
parameter. To remove multiple features, use the ``features`` parameter.
Args:
name (str):
Short name of the feature (the right column in
win_servermanager.list_available). This can be a single feature or a
string of features in a comma-delimited list (no spaces)
.. note::
A list is not allowed in the name parameter of any state. Use
the ``features`` parameter if you want to pass the features as a
list
features (Optional[list]):
A list of features to remove. If this is passed it will be used
instead of the ``name`` parameter.
.. versionadded:: 2018.3.0
remove_payload (Optional[bool]):
True will cause the feature to be removed from the side-by-side
store. To install the feature in the future you will need to
specify the ``source``
restart (Optional[bool]):
Restarts the computer when uninstall is complete if required by the
role/feature uninstall. Default is False
.. note::
Some features require a reboot after uninstall. If so the feature will
not be completely uninstalled until the server is restarted.
Example:
Do not use the role or feature names mentioned in the PKGMGR
documentation. To get a list of available roles and features run the
following command:
.. code-block:: bash
salt <minion_name> win_servermanager.list_available
Use the name in the right column of the results.
.. code-block:: yaml
# Uninstall the IIS Web Server Rol (Web-Server)
IIS-WebserverRole:
win_servermanager.removed:
- name: Web-Server
# Uninstall multiple features, reboot if required
uninstall_multiple_features:
win_servermanager.removed:
- features:
- RemoteAccess
- XPX-Viewer
- SNMP-Service
- restart: True
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Check if features is not passed, use name. Split commas
if features is None:
features = name.split(",")
# Make sure features is a list, split commas
if not isinstance(features, list):
features = features.split(",")
# Determine if the feature is installed
old = __salt__["win_servermanager.list_installed"]()
rem_feat = []
for feature in features:
if feature in old:
ret["changes"][feature] = "Will be removed"
else:
rem_feat.append(feature)
if rem_feat:
rem_feat.insert(0, "The following features are not installed:")
ret["comment"] = "\n- ".join(rem_feat)
if not ret["changes"]:
return ret
if __opts__["test"]:
ret["result"] = None
return ret
# Remove the features
status = __salt__["win_servermanager.remove"](
features, remove_payload=remove_payload, restart=restart
)
ret["result"] = status["Success"]
# Some items failed to uninstall
fail_feat = []
rem_feat = []
for feature in status["Features"]:
# Use get because sometimes 'Success' isn't defined such as when the
# feature is already uninstalled
if not status["Features"][feature].get("Success", True):
# Show items that failed to uninstall
fail_feat.append("- {}".format(feature))
else:
# Show items that uninstalled
rem_feat.append("- {}".format(feature))
if fail_feat:
fail_feat.insert(0, "Failed to remove the following:")
if rem_feat:
rem_feat.insert(0, "Removed the following:")
ret["comment"] = "\n".join(fail_feat + rem_feat)
# Get the changes
new = __salt__["win_servermanager.list_installed"]()
ret["changes"] = salt.utils.data.compare_dicts(old, new)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_servermanager.py | 0.75274 | 0.340485 | win_servermanager.py | pypi |
import logging
import salt.utils.data
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = "pdbedit"
def __virtual__():
"""
Provides pdbedit when available
"""
if "pdbedit.create" in __salt__:
return True
else:
return (
False,
"{} state module can only be loaded when the pdbedit module is available".format(
__virtualname__
),
)
def absent(name):
"""
Ensure user account is absent
name : string
username
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# remove if needed
if name in __salt__["pdbedit.list"](False):
res = __salt__["pdbedit.delete"](name)
if res[name] in ["deleted"]: # check if we need to update changes
ret["changes"].update(res)
elif res[name] not in ["absent"]: # oops something went wrong
ret["result"] = False
else:
ret["comment"] = "account {login} is absent".format(login=name)
return ret
def managed(name, **kwargs):
"""
Manage user account
login : string
login name
password : string
password
password_hashed : boolean
set if password is a nt hash instead of plain text
domain : string
users domain
profile : string
profile path
script : string
logon script
drive : string
home drive
homedir : string
home directory
fullname : string
full name
account_desc : string
account description
machine_sid : string
specify the machines new primary group SID or rid
user_sid : string
specify the users new primary group SID or rid
account_control : string
specify user account control properties
.. note::
Only the following can be set:
- N: No password required
- D: Account disabled
- H: Home directory required
- L: Automatic Locking
- X: Password does not expire
reset_login_hours : boolean
reset the users allowed logon hours
reset_bad_password_count : boolean
reset the stored bad login counter
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# save state
saved = __salt__["pdbedit.list"](hashes=True)
saved = saved[name] if name in saved else {}
# call pdbedit.modify
kwargs["login"] = name
res = __salt__["pdbedit.modify"](**kwargs)
# calculate changes
if res[name] in ["created"]:
ret["changes"] = res
elif res[name] in ["updated"]:
ret["changes"][name] = salt.utils.data.compare_dicts(
saved,
__salt__["pdbedit.list"](hashes=True)[name],
)
elif res[name] not in ["unchanged"]:
ret["result"] = False
ret["comment"] = res[name]
return ret
def present(name, **kwargs):
"""
Alias for pdbedit.managed
"""
return managed(name, **kwargs)
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/pdbedit.py | 0.46563 | 0.202167 | pdbedit.py | pypi |
import logging
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def repo_managed(
name,
present=None,
absent=None,
prune=False,
repo_update=False,
namespace=None,
flags=None,
kvflags=None,
):
"""
Make sure the repository is updated.
name
(string) Not used.
present
(list) List of repository to be present. It's a list of dict: [{'name': 'local_name', 'url': 'repository_url'}]
absent
(list) List of local name repository to be absent.
prune
(boolean - default: False) If True, all repository already present but not in the present list would be removed.
repo_update
(boolean - default: False) If True, the Helm repository is updated after a repository add or remove.
namespace
(string) The namespace scope for this request.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
Example:
.. code-block:: yaml
helm_repository_is_managed:
helm.repo_managed:
- present:
- name: local_name_1
url: repository_url
- absent:
- local_name_2
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Helm repo is managed.",
}
if "helm.repo_manage" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.repo_manage' modules not available on this minion."
elif "helm.repo_update" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.repo_update' modules not available on this minion."
elif __opts__.get("test", False):
ret["result"] = None
ret["comment"] = "Helm repo would have been managed."
else:
try:
result = __salt__["helm.repo_manage"](
present=present,
absent=absent,
prune=prune,
namespace=namespace,
flags=flags,
kvflags=kvflags,
)
if result["failed"]:
ret["comment"] = "Failed to add or remove some repositories."
ret["changes"] = result
ret["result"] = False
elif result["added"] or result["removed"]:
if repo_update:
result_repo_update = __salt__["helm.repo_update"](
namespace=namespace, flags=flags, kvflags=kvflags
)
result.update({"repo_update": result_repo_update})
ret["comment"] = "Repositories were added or removed."
ret["changes"] = result
except CommandExecutionError as err:
ret["result"] = False
ret["comment"] = "Failed to add some repositories: {}.".format(err)
return ret
def repo_updated(name, namespace=None, flags=None, kvflags=None):
"""
Make sure the repository is updated.
To execute after a repository changes.
name
(string) Not used.
namespace
(string) The namespace scope for this request.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
Example:
.. code-block:: yaml
helm_repository_is_updated:
helm.repo_updated
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Helm repo is updated.",
}
if "helm.repo_update" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.repo_update' modules not available on this minion."
elif __opts__.get("test", False):
ret["result"] = None
ret["comment"] = "Helm repo would have been updated."
else:
try:
result = __salt__["helm.repo_update"](
namespace=namespace, flags=flags, kvflags=kvflags
)
if not (isinstance(result, bool) and result):
ret["result"] = False
ret["changes"] = result
ret["comment"] = "Failed to sync some repositories."
except CommandExecutionError as err:
ret["result"] = False
ret["comment"] = "Failed to update some repositories: {}.".format(err)
return ret
def release_present(
name,
chart,
values=None,
version=None,
namespace=None,
set=None,
flags=None,
kvflags=None,
):
"""
Make sure the release name is present.
name
(string) The release name to install.
chart
(string) The chart to install.
values
(string) Absolute path to the values.yaml file.
version
(string) The exact chart version to install. If this is not specified, the latest version is installed.
namespace
(string) The namespace scope for this request.
set
(string or list) Set a values on the command line.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
Example:
.. code-block:: yaml
helm_release_is_present:
helm.release_present:
- name: release_name
- chart: repo/chart
# In dry-run mode.
helm_release_is_present_dry-run:
helm.release_present:
- name: release_name
- chart: repo/chart
- flags:
- dry-run
# With values.yaml file.
helm_release_is_present_values:
helm.release_present:
- name: release_name
- chart: repo/chart
- kvflags:
values: /path/to/values.yaml
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Helm release {} is present".format(name),
}
if "helm.status" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.status' modules not available on this minion."
elif "helm.install" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.install' modules not available on this minion."
elif "helm.upgrade" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.upgrade' modules not available on this minion."
elif __opts__.get("test", False):
ret["result"] = None
ret["comment"] = "Helm release would have been installed or updated."
else:
release_old_status = __salt__["helm.status"](release=name, namespace=namespace)
if isinstance(release_old_status, dict):
release_upgrade = __salt__["helm.upgrade"](
release=name,
chart=chart,
values=values,
version=version,
namespace=namespace,
set=set,
flags=flags,
kvflags=kvflags,
)
if isinstance(release_upgrade, bool) and release_upgrade:
release_cur_status = __salt__["helm.status"](
release=name, namespace=namespace
)
if isinstance(release_cur_status, dict):
release_cur_status.pop("manifest")
ret["changes"] = release_cur_status
else:
ret["result"] = False
ret["comment"] = release_cur_status
else:
ret["result"] = False
ret["comment"] = release_upgrade
else:
release_install = __salt__["helm.install"](
release=name,
chart=chart,
values=values,
version=version,
namespace=namespace,
set=set,
flags=flags,
kvflags=kvflags,
)
if isinstance(release_install, bool) and release_install:
release_cur_status = __salt__["helm.status"](
release=name, namespace=namespace
)
if isinstance(release_cur_status, dict):
release_cur_status.pop("manifest")
ret["changes"] = release_cur_status
else:
ret["result"] = False
ret["comment"] = release_cur_status
else:
ret["result"] = False
ret["comment"] = release_install
return ret
def release_absent(name, namespace=None, flags=None, kvflags=None):
"""
Make sure the release name is absent.
name
(string) The release name to uninstall.
namespace
(string) The namespace scope for this request.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
Example:
.. code-block:: yaml
helm_release_is_absent:
helm.release_absent:
- name: release_name
# In dry-run mode.
helm_release_is_absent_dry-run:
helm.release_absent:
- name: release_name
- flags:
- dry-run
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Helm release {} is absent.".format(name),
}
if "helm.uninstall" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.uninstall' modules not available on this minion."
elif "helm.status" not in __salt__:
ret["result"] = False
ret["comment"] = "'helm.status' modules not available on this minion."
elif __opts__.get("test", False):
ret["result"] = None
ret["comment"] = "Helm release would have been uninstalled."
else:
release_status = __salt__["helm.status"](release=name, namespace=namespace)
if isinstance(release_status, dict):
release_uninstall = __salt__["helm.uninstall"](
release=name, namespace=namespace, flags=flags, kvflags=kvflags
)
if isinstance(release_uninstall, bool) and release_uninstall:
ret["changes"] = {"absent": name}
else:
ret["result"] = False
ret["comment"] = release_uninstall
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/helm.py | 0.543833 | 0.160102 | helm.py | pypi |
import copy
__virtualname__ = "redis"
def __virtual__():
"""
Only load if the redis module is in __salt__
"""
if "redis.set_key" in __salt__:
return __virtualname__
return (False, "redis module could not be loaded")
def string(name, value, expire=None, expireat=None, **connection_args):
"""
Ensure that the key exists in redis with the value specified
name
Redis key to manage
value
Data to persist in key
expire
Sets time to live for key in seconds
expireat
Sets expiration time for key via UNIX timestamp, overrides `expire`
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Key already set to defined value",
}
old_key = __salt__["redis.get_key"](name, **connection_args)
if old_key != value:
__salt__["redis.set_key"](name, value, **connection_args)
ret["changes"][name] = "Value updated"
ret["comment"] = "Key updated to new value"
if expireat:
__salt__["redis.expireat"](name, expireat, **connection_args)
ret["changes"]["expireat"] = "Key expires at {}".format(expireat)
elif expire:
__salt__["redis.expire"](name, expire, **connection_args)
ret["changes"]["expire"] = "TTL set to {} seconds".format(expire)
return ret
def absent(name, keys=None, **connection_args):
"""
Ensure key absent from redis
name
Key to ensure absent from redis
keys
list of keys to ensure absent, name will be ignored if this is used
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Key(s) specified already absent",
}
if keys:
if not isinstance(keys, list):
ret["result"] = False
ret["comment"] = "`keys` not formed as a list type"
return ret
delete_list = [
key for key in keys if __salt__["redis.exists"](key, **connection_args)
]
if not delete_list:
return ret
__salt__["redis.delete"](*delete_list, **connection_args)
ret["changes"]["deleted"] = delete_list
ret["comment"] = "Keys deleted"
return ret
if __salt__["redis.exists"](name, **connection_args):
__salt__["redis.delete"](name, **connection_args)
ret["comment"] = "Key deleted"
ret["changes"]["deleted"] = [name]
return ret
def slaveof(
name,
sentinel_host=None,
sentinel_port=None,
sentinel_password=None,
**connection_args
):
"""
Set this redis instance as a slave.
.. versionadded:: 2016.3.0
name
Master to make this a slave of
sentinel_host
Ip of the sentinel to check for the master
sentinel_port
Port of the sentinel to check for the master
"""
ret = {
"name": name,
"changes": {},
"result": False,
"comment": "Failed to setup slave",
}
kwargs = copy.copy(connection_args)
sentinel_master = __salt__["redis.sentinel_get_master_ip"](
name, sentinel_host, sentinel_port, sentinel_password
)
if sentinel_master["master_host"] in __salt__["network.ip_addrs"]():
ret["result"] = True
ret["comment"] = "Minion is the master: {}".format(name)
return ret
first_master = __salt__["redis.get_master_ip"](**connection_args)
if first_master == sentinel_master:
ret["result"] = True
ret["comment"] = "Minion already slave of master: {}".format(name)
return ret
if __opts__["test"] is True:
ret["comment"] = "Minion will be made a slave of {}: {}".format(
name, sentinel_master["host"]
)
ret["result"] = None
return ret
kwargs.update(**sentinel_master)
__salt__["redis.slaveof"](**kwargs)
current_master = __salt__["redis.get_master_ip"](**connection_args)
if current_master != sentinel_master:
return ret
ret["result"] = True
ret["changes"] = {
"old": first_master,
"new": current_master,
}
ret["comment"] = "Minion successfully connected to master: {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/redismod.py | 0.710929 | 0.246709 | redismod.py | pypi |
__func_alias__ = {"set_": "set"}
def __virtual__():
"""
Only load if the eselect module is available in __salt__
"""
if "eselect.exec_action" in __salt__:
return "eselect"
return (False, "eselect module could not be loaded")
def set_(name, target, module_parameter=None, action_parameter=None):
"""
Verify that the given module is set to the given target
name
The name of the module
target
The target to be set for this module
module_parameter
additional params passed to the defined module
action_parameter
additional params passed to the defined action
.. code-block:: yaml
profile:
eselect.set:
- target: hardened/linux/amd64
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
old_target = __salt__["eselect.get_current_target"](
name, module_parameter=module_parameter, action_parameter=action_parameter
)
if target == old_target:
ret["comment"] = "Target '{}' is already set on '{}' module.".format(
target, name
)
elif target not in __salt__["eselect.get_target_list"](
name, action_parameter=action_parameter
):
ret["comment"] = "Target '{}' is not available for '{}' module.".format(
target, name
)
ret["result"] = False
elif __opts__["test"]:
ret["comment"] = "Target '{}' will be set on '{}' module.".format(target, name)
ret["result"] = None
else:
result = __salt__["eselect.set_target"](
name,
target,
module_parameter=module_parameter,
action_parameter=action_parameter,
)
if result:
ret["changes"][name] = {"old": old_target, "new": target}
ret["comment"] = "Target '{}' set on '{}' module.".format(target, name)
else:
ret["comment"] = "Target '{}' failed to be set on '{}' module.".format(
target, name
)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/eselect.py | 0.616936 | 0.160299 | eselect.py | pypi |
r"""
Manage Windows Local Group Policy
=================================
.. versionadded:: 2016.11.0
This state module allows you to configure local Group Policy on Windows. You
can ensure the setting of a single policy or multiple policies in one pass.
Single policies must specify the policy name, the setting, and the policy class
(Machine/User/Both). Here are some examples for setting a single policy setting.
Example single policy configuration:
.. code-block:: yaml
Ensure Account Lockout Duration:
lgpo.set:
- name: Account lockout duration
- setting: 90
- policy_class: Machine
Example using abbreviated form:
.. code-block:: yaml
Account lockout duration:
lgpo.set:
- setting: 120
- policy_class: Machine
It is also possible to set multiple policies in a single state. This is done by
setting the settings under either `computer_policy` or `user_policy`. Here are
some examples for setting multiple policy settings in a single state.
Multiple policy configuration
.. code-block:: yaml
Company Local Group Policy:
lgpo.set:
- computer_policy:
Deny log on locally:
- Guest
Account lockout duration: 120
Account lockout threshold: 10
Reset account lockout counter after: 120
Enforce password history: 24
Maximum password age: 60
Minimum password age: 1
Minimum password length: 14
Password must meet complexity requirements: Enabled
Store passwords using reversible encryption: Disabled
Configure Automatic Updates:
Configure automatic updating: 4 - Auto download and schedule the intsall
Scheduled install day: 7 - Every Saturday
Scheduled install time: 17:00
Specify intranet Microsoft update service location:
Set the intranet update service for detecting updates: http://mywsus
Set the intranet statistics server: http://mywsus
- user_policy:
Do not process the legacy run list: Enabled
.. code-block:: text
server_policy:
lgpo.set:
- computer_policy:
Maximum password age: 60
Minimum password age: 1
Minimum password length: 14
Account lockout duration: 120
Account lockout threshold: 10
Reset account lockout counter after: 120
Manage auditing and security log:
- "BUILTIN\\Administrators"
Replace a process level token:
- "NT AUTHORITY\\NETWORK SERVICE"
- "NT AUTHORITY\\LOCAL SERVICE"
"Accounts: Guest account status": Disabled
"Accounts: Rename guest account": Not_4_U
"Audit: Audit the use of Backup and Restore privilege": Enabled
"Interactive logon: Do not display last user name": Enabled
"Network\\DNS Client\\Dynamic update": Disabled
"System\\Logon\\Do not display the Getting Started welcome screen at logon": Enabled
"Windows Components\\Remote Desktop Services\\Remote Desktop Session Host\\Connections\\Select RDP transport protocols":
"Select Transport Type": "Use both UDP and TCP"
"Windows Components\\Windows Update\\Allow Automatic Updates immediate installation": Enabled
"Windows Components\\Windows Update\\Allow non-administrators to receive update notifications": Disabled
"Windows Components\\Windows Update\\Always automatically restart at the scheduled time":
"The restart timer will give users this much time to save their work (minutes)": 15
"Windows Components\\Windows Update\\Automatic Updates detection frequency":
"Check for updates at the following interval (hours)": 1
"Windows Components\\Windows Update\\Configure Automatic Updates":
"Configure automatic updating": 4 - Auto download and schedule the install
"Install during automatic maintenance": False
"Scheduled install day": 7 - Every Saturday
"Scheduled install time": "17:00"
"Windows Components\\Windows Update\\Delay Restart for scheduled installations":
"Wait the following period before proceeding with a scheduled restart (minutes)": 1
"Windows Components\\Windows Update\\No auto-restart with logged on users for scheduled automatic updates installations": Disabled
"Windows Components\\Windows Update\\Re-prompt for restart with scheduled installations":
"Wait the following period before prompting again with a scheduled restart (minutes)": 30
"Windows Components\\Windows Update\\Reschedule Automatic Updates scheduled installations": Disabled
"Windows Components\\Windows Update\\Specify intranet Microsoft update service location":
"Set the intranet update service for detecting updates": http://mywsus
"Set the intranet statistics server": http://mywsus
- cumulative_rights_assignments: True
Some policy settings can't be set on their own an require that other policy
settings are set at the same time. It can be difficult to figure out what
additional settings need to be applied. The easiest way to do this is to
modify the setting manually using the Group Policy Editor (`gpedit.msc`) on
the machine. Then `get` the policy settings configured on that machine. Use
the following command:
.. code-block:: bash
salt-call --local lgpo.get machine
For example, if I want to set the Windows Update settings for a Windows
Server 2016 machine I would go into the Group Policy Editor (`gpedit.msc`)
and configure the group policy. That policy can be found at: Computer
Configuration -> Administrative Templates -> Windows Components -> Windows
Update -> Configure Automatic Updates. You have the option to "Enable" the
policy and set some configuration options. In this example, just click
"Enable" and accept the default configuration options. Click "OK" to apply
the setting.
Now run the `get` command as shown above. You will find the following in
the minion return:
.. code-block:: bash
Windows Components\Windows Update\Configure Automatic Updates:
----------
Configure automatic updating:
3 - Auto download and notify for install
Install during automatic maintenance:
False
Install updates for other Microsoft products:
False
Scheduled install day:
0 - Every day
Scheduled install time:
03:00
This shows you that to enable the "Configure Automatic Updates" policy you
also have to configure the following settings:
- Configure automatic updating
- Install during automatic maintenance
- Install updates for other Microsoft products
- Scheduled install day
- Scheduled install time
So, if you were writing a state for the above policy, it would look like
this:
.. code-block:: bash
configure_windows_update_settings:
lgpo.set:
- computer_policy:
Configure Automatic Updates:
Configure automatic updating: 3 - Auto download and notify for install
Install during automatic maintenance: False
Install updates for other Microsoft products: False
Scheduled install day: 0 - Every day
Scheduled install time: 03:00
.. note::
It is important that you put names of policies and settings exactly as
they are displayed in the return. That includes capitalization and
punctuation such as periods, dashes, etc. This rule applies to both
the setting name and the setting value.
.. warning::
From time to time Microsoft updates the Administrative templates on the
machine. This can cause the policy name to change or the list of
settings that must be applied at the same time. These settings often
change between versions of Windows as well. For example, Windows Server
2019 allows you to also specify a specific week of the month to apply
the update.
Another thing note is the long policy name returned by the `get` function:
.. code-block:: bash
Windows Components\Windows Update\Configure Automatic Updates:
When we wrote the state for this policy we only used the final portion of
the policy name, `Configure Automatic Updates`. This usually works fine, but
if you are having problems, you may try the long policy name.
When writing the long name in a state file either wrap the name in single
quotes to make yaml see it as raw data, or escape the back slashes.
.. code-block:: bash
'Windows Components\Windows Update\Configure Automatic Updates:'
or
Windows Components\\Windows Update\\Configure Automatic Updates:
"""
import logging
import salt.utils.data
import salt.utils.dictdiffer
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.win_functions
log = logging.getLogger(__name__)
__virtualname__ = "lgpo"
__func_alias__ = {"set_": "set"}
def __virtual__():
"""
load this state if the win_lgpo module exists
"""
if "lgpo.set" in __salt__:
return __virtualname__
return False, "lgpo module could not be loaded"
def _compare_policies(new_policy, current_policy):
"""
Helper function that returns ``True`` if the policies are the same,
otherwise ``False``
"""
# Compared dicts, lists, and strings
if isinstance(new_policy, (str, int)):
return new_policy == current_policy
elif isinstance(new_policy, list):
if isinstance(current_policy, list):
return salt.utils.data.compare_lists(new_policy, current_policy) == {}
else:
return False
elif isinstance(new_policy, dict):
if isinstance(current_policy, dict):
return salt.utils.data.compare_dicts(new_policy, current_policy) == {}
else:
return False
def _convert_to_unicode(data):
"""
Helper function that makes sure all items in the dictionary are unicode for
comparing the existing state with the desired state. This function is only
needed for Python 2 and can be removed once we've migrated to Python 3.
The data returned by the current settings sometimes has a mix of unicode and
string values (these don't matter in Py3). This causes the comparison to
say it's not in the correct state even though it is. They basically compares
apples to apples, etc.
Also, in Python 2, the utf-16 encoded strings remain utf-16 encoded (each
character separated by `/x00`) In Python 3 it returns a utf-8 string. This
will just remove all the null bytes (`/x00`), again comparing apples to
apples.
"""
if isinstance(data, str):
data = data.replace("\x00", "")
return salt.utils.stringutils.to_unicode(data)
elif isinstance(data, dict):
return {_convert_to_unicode(k): _convert_to_unicode(v) for k, v in data.items()}
elif isinstance(data, list):
return list(_convert_to_unicode(v) for v in data)
else:
return data
def set_(
name,
setting=None,
policy_class=None,
computer_policy=None,
user_policy=None,
cumulative_rights_assignments=True,
adml_language="en-US",
):
"""
Ensure the specified policy is set.
.. warning::
The ``setting`` argument cannot be used in conjunction with the
``computer_policy`` or ``user_policy`` arguments
Args:
name (str): The name of a single policy to configure
setting (str, dict, list):
The configuration setting for the single named policy. If this
argument is used the ``computer_policy`` / ``user_policy`` arguments
will be ignored
policy_class (str):
The policy class of the single named policy to configure. This can
``machine``, ``user``, or ``both``
computer_policy (dict):
A dictionary of containing the policy name and key/value pairs of a
set of computer policies to configure. If this argument is used, the
``name`` / ``policy_class`` arguments will be ignored
user_policy (dict):
A dictionary of containing the policy name and key/value pairs of a
set of user policies to configure. If this argument is used, the
``name`` / ``policy_class`` arguments will be ignored
cumulative_rights_assignments (bool):
If user rights assignments are being configured, determines if any
user right assignment policies specified will be cumulative or
explicit
adml_language (str):
The adml language to use for AMDX policy data/display conversions.
Default is ``en-US``
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
policy_classes = ["machine", "computer", "user", "both"]
class_map = {
"computer": "Computer Configuration",
"machine": "Computer Configuration",
"user": "User Configuration",
}
if not setting and not computer_policy and not user_policy:
msg = (
"At least one of the parameters setting, computer_policy, or "
"user_policy must be specified."
)
ret["result"] = False
ret["comment"] = msg
return ret
if setting and not policy_class:
msg = (
"A single policy setting was specified but the policy_class "
"was not specified."
)
ret["result"] = False
ret["comment"] = msg
return ret
if setting and (computer_policy or user_policy):
msg = (
"The setting and computer_policy/user_policy parameters are "
"mutually exclusive. Please specify either a policy name and "
"setting or a computer_policy and/or user_policy dict"
)
ret["result"] = False
ret["comment"] = msg
return ret
if policy_class and policy_class.lower() not in policy_classes:
msg = "The policy_class parameter must be one of the following: {}"
ret["result"] = False
ret["comment"] = msg
return ret
if not setting:
if computer_policy and not isinstance(computer_policy, dict):
msg = "The computer_policy must be specified as a dict."
ret["result"] = False
ret["comment"] = msg
return ret
if user_policy and not isinstance(user_policy, dict):
msg = "The user_policy must be specified as a dict."
ret["result"] = False
ret["comment"] = msg
return ret
else:
user_policy = {}
computer_policy = {}
if policy_class.lower() == "both":
user_policy[name] = setting
computer_policy[name] = setting
elif policy_class.lower() == "user":
user_policy[name] = setting
elif policy_class.lower() in ["machine", "computer"]:
computer_policy[name] = setting
pol_data = {
"user": {"requested_policy": user_policy, "policy_lookup": {}},
"machine": {"requested_policy": computer_policy, "policy_lookup": {}},
}
current_policy = {}
deprecation_comments = []
for p_class, p_data in pol_data.items():
if p_data["requested_policy"]:
for p_name, _ in p_data["requested_policy"].items():
lookup = __salt__["lgpo.get_policy_info"](
policy_name=p_name,
policy_class=p_class,
adml_language=adml_language,
)
if lookup["policy_found"]:
pol_data[p_class]["policy_lookup"][p_name] = lookup
# Since we found the policy, let's get the current setting
# as well
current_policy.setdefault(class_map[p_class], {})
current_policy[class_map[p_class]][p_name] = __salt__[
"lgpo.get_policy"
](
policy_name=p_name,
policy_class=p_class,
adml_language=adml_language,
return_value_only=True,
)
# Validate element names
if isinstance(p_data["requested_policy"][p_name], dict):
valid_names = []
for element in lookup["policy_elements"]:
valid_names.extend(element["element_aliases"])
for e_name in p_data["requested_policy"][p_name]:
if e_name not in valid_names:
new_e_name = e_name.split(":")[-1].strip()
# If we find an invalid name, test the new
# format. If found, add to deprecation comments
# and bail
if new_e_name in valid_names:
msg = (
'"{}" is no longer valid.\n'
'Please use "{}" instead.'
"".format(e_name, new_e_name)
)
deprecation_comments.append(msg)
else:
msg = "Invalid element name: {}".format(e_name)
ret["comment"] = "\n".join(
[ret["comment"], msg]
).strip()
ret["result"] = False
else:
ret["comment"] = "\n".join(
[ret["comment"], lookup["message"]]
).strip()
ret["result"] = False
if not ret["result"]:
if deprecation_comments:
deprecation_comments.insert(
0, "The LGPO module changed the way it gets policy element names."
)
deprecation_comments.append(ret["comment"])
ret["comment"] = "\n".join(deprecation_comments).strip()
return ret
log.debug("pol_data == %s", pol_data)
log.debug("current policy == %s", current_policy)
# compare policies
policy_changes = []
for p_class, p_data in pol_data.items():
requested_policy = p_data.get("requested_policy")
if requested_policy:
for p_name, p_setting in requested_policy.items():
if p_name in current_policy[class_map[p_class]]:
# compare the requested and current policies
log.debug(
"need to compare %s from current/requested policy", p_name
)
# resolve user names in the requested policy and the current
# policy so that we are comparing apples to apples
if p_data["policy_lookup"][p_name]["rights_assignment"]:
resolved_names = []
for name in p_data["requested_policy"][p_name]:
resolved_names.append(
salt.utils.win_functions.get_sam_name(name)
)
p_data["requested_policy"][p_name] = resolved_names
resolved_names = []
for name in current_policy[class_map[p_class]][p_name]:
resolved_names.append(
salt.utils.win_functions.get_sam_name(name)
)
current_policy[class_map[p_class]][p_name] = resolved_names
changes = False
requested_policy_json = salt.utils.json.dumps(
p_data["requested_policy"][p_name], sort_keys=True
)
current_policy_json = salt.utils.json.dumps(
current_policy[class_map[p_class]][p_name], sort_keys=True
)
requested_policy_check = salt.utils.json.loads(
requested_policy_json
)
current_policy_check = salt.utils.json.loads(current_policy_json)
# Are the requested and current policies identical
policies_are_equal = _compare_policies(
requested_policy_check, current_policy_check
)
if not policies_are_equal:
if (
p_data["policy_lookup"][p_name]["rights_assignment"]
and cumulative_rights_assignments
):
for user in p_data["requested_policy"][p_name]:
if (
user
not in current_policy[class_map[p_class]][p_name]
):
user = salt.utils.win_functions.get_sam_name(user)
if (
user
not in current_policy[class_map[p_class]][
p_name
]
):
changes = True
else:
changes = True
if changes:
log.debug("%s current policy != requested policy", p_name)
log.debug(
"We compared %s to %s",
requested_policy_json,
current_policy_json,
)
policy_changes.append(p_name)
else:
msg = '"{}" is already set'.format(p_name)
log.debug(msg)
else:
policy_changes.append(p_name)
log.debug("policy %s is not set, we will configure it", p_name)
if __opts__["test"]:
if policy_changes:
msg = "The following policies are set to change:\n{}".format(
"\n".join(policy_changes)
)
ret["result"] = None
else:
msg = "All specified policies are properly configured"
deprecation_comments.append(msg)
ret["comment"] = "\n".join(deprecation_comments).strip()
else:
if policy_changes:
_ret = __salt__["lgpo.set"](
computer_policy=pol_data["machine"]["requested_policy"],
user_policy=pol_data["user"]["requested_policy"],
cumulative_rights_assignments=cumulative_rights_assignments,
adml_language=adml_language,
)
if _ret:
ret["result"] = _ret
new_policy = {}
for p_class, p_data in pol_data.items():
if p_data["requested_policy"]:
for p_name, p_setting in p_data["requested_policy"].items():
new_policy.setdefault(class_map[p_class], {})
new_policy[class_map[p_class]][p_name] = __salt__[
"lgpo.get_policy"
](
policy_name=p_name,
policy_class=p_class,
adml_language=adml_language,
return_value_only=True,
)
ret["changes"] = salt.utils.dictdiffer.deep_diff(
old=current_policy, new=new_policy
)
if ret["changes"]:
msg = "The following policies changed:\n{}".format(
"\n".join(policy_changes)
)
else:
msg = "Failed to set the following policies:\n{}".format(
"\n".join(policy_changes)
)
ret["result"] = False
else:
msg = (
"Errors occurred while attempting to configure policies: {}".format(
_ret
)
)
ret["result"] = False
deprecation_comments.append(msg)
ret["comment"] = "\n".join(deprecation_comments).strip()
else:
msg = "All specified policies are properly configured"
deprecation_comments.append(msg)
ret["comment"] = "\n".join(deprecation_comments).strip()
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_lgpo.py | 0.916596 | 0.445771 | win_lgpo.py | pypi |
import logging
import salt.utils.validate.net
log = logging.getLogger(__name__)
def present(name, ip, comment="", clean=False): # pylint: disable=C0103
"""
Ensures that the named host is present with the given ip
name
The host to assign an ip to
ip
The ip addr(s) to apply to the host. Can be a single IP or a list of IP
addresses.
comment
A comment to include for the host entry
.. versionadded:: 3001
clean
Remove any entries which don't match those configured in the ``ip``
option. Default is ``False``.
.. versionadded:: 2018.3.4
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not isinstance(ip, list):
ip = [ip]
all_hosts = __salt__["hosts.list_hosts"]()
comments = []
to_add = set()
to_remove = set()
update_comment = set()
# First check for IPs not currently in the hosts file
to_add.update([(addr, name) for addr in ip if addr not in all_hosts])
if comment:
update_comment.update([(addr, comment) for addr in ip if addr not in all_hosts])
# Now sweep through the hosts file and look for entries matching either the
# IP address(es) or hostname.
for addr, host_info in all_hosts.items():
if addr not in ip:
if "aliases" in host_info and name in host_info["aliases"]:
# Found match for hostname, but the corresponding IP is not in
# our list, so we need to remove it.
if clean:
to_remove.add((addr, name))
else:
ret.setdefault("warnings", []).append(
"Host {0} present for IP address {1}. To get rid of "
"this warning, either run this state with 'clean' "
"set to True to remove {0} from {1}, or add {1} to "
"the 'ip' argument.".format(name, addr)
)
else:
if "aliases" in host_info and name in host_info["aliases"]:
if (
comment
and "comment" in host_info
and host_info["comment"] != comment
):
update_comment.add((addr, comment))
elif comment and "comment" not in host_info:
update_comment.add((addr, comment))
else:
# No changes needed for this IP address and hostname
comments.append("Host {} ({}) already present".format(name, addr))
else:
# IP address listed in hosts file, but hostname is not present.
# We will need to add it.
if salt.utils.validate.net.ip_addr(addr):
to_add.add((addr, name))
if comment:
update_comment.add((addr, comment))
else:
ret["result"] = False
comments.append("Invalid IP Address for {} ({})".format(name, addr))
for addr, name in to_add:
if __opts__["test"]:
ret["result"] = None
comments.append("Host {} ({}) would be added".format(name, addr))
else:
if __salt__["hosts.add_host"](addr, name):
comments.append("Added host {} ({})".format(name, addr))
else:
ret["result"] = False
comments.append("Failed to add host {} ({})".format(name, addr))
continue
ret["changes"].setdefault("added", {}).setdefault(addr, []).append(name)
for addr, comment in update_comment:
if __opts__["test"]:
comments.append("Comment for {} ({}) would be added".format(addr, comment))
else:
if __salt__["hosts.set_comment"](addr, comment):
comments.append("Set comment for host {} ({})".format(addr, comment))
else:
ret["result"] = False
comments.append(
"Failed to add comment for host {} ({})".format(addr, comment)
)
continue
ret["changes"].setdefault("comment_added", {}).setdefault(addr, []).append(
comment
)
for addr, name in to_remove:
if __opts__["test"]:
ret["result"] = None
comments.append("Host {} ({}) would be removed".format(name, addr))
else:
if __salt__["hosts.rm_host"](addr, name):
comments.append("Removed host {} ({})".format(name, addr))
else:
ret["result"] = False
comments.append("Failed to remove host {} ({})".format(name, addr))
continue
ret["changes"].setdefault("removed", {}).setdefault(addr, []).append(name)
ret["comment"] = "\n".join(comments)
return ret
def absent(name, ip): # pylint: disable=C0103
"""
Ensure that the named host is absent
name
The host to remove
ip
The ip addr(s) of the host to remove
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if not isinstance(ip, list):
ip = [ip]
comments = []
for _ip in ip:
if not __salt__["hosts.has_pair"](_ip, name):
ret["result"] = True
comments.append("Host {} ({}) already absent".format(name, _ip))
else:
if __opts__["test"]:
comments.append("Host {} ({}) needs to be removed".format(name, _ip))
else:
if __salt__["hosts.rm_host"](_ip, name):
ret["changes"] = {"host": name}
ret["result"] = True
comments.append("Removed host {} ({})".format(name, _ip))
else:
ret["result"] = False
comments.append("Failed to remove host")
ret["comment"] = "\n".join(comments)
return ret
def only(name, hostnames):
"""
Ensure that only the given hostnames are associated with the
given IP address.
.. versionadded:: 2016.3.0
name
The IP address to associate with the given hostnames.
hostnames
Either a single hostname or a list of hostnames to associate
with the given IP address in the given order. Any other
hostname associated with the IP address is removed. If no
hostnames are specified, all hostnames associated with the
given IP address are removed.
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if isinstance(hostnames, str):
hostnames = [hostnames]
old = " ".join(__salt__["hosts.get_alias"](name))
new = " ".join(x.strip() for x in hostnames)
if old == new:
ret["comment"] = 'IP address {} already set to "{}"'.format(name, new)
ret["result"] = True
return ret
if __opts__["test"]:
ret["comment"] = 'Would change {} from "{}" to "{}"'.format(name, old, new)
return ret
ret["result"] = __salt__["hosts.set_host"](name, new)
if not ret["result"]:
ret["comment"] = 'hosts.set_host failed to change {} from "{}" to "{}"'.format(
name, old, new
)
return ret
ret["comment"] = 'successfully changed {} from "{}" to "{}"'.format(name, old, new)
ret["changes"] = {name: {"old": old, "new": new}}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/host.py | 0.537284 | 0.21262 | host.py | pypi |
def __virtual__():
"""
Only load if the lvs module is available in __salt__
"""
if "lvs.get_rules" in __salt__:
return "lvs_server"
return (False, "lvs module could not be loaded")
def present(
name,
protocol=None,
service_address=None,
server_address=None,
packet_forward_method="dr",
weight=1,
):
"""
Ensure that the named service is present.
name
The LVS server name
protocol
The service protocol
service_address
The LVS service address
server_address
The real server address.
packet_forward_method
The LVS packet forwarding method(``dr`` for direct routing, ``tunnel`` for tunneling, ``nat`` for network access translation).
weight
The capacity of a server relative to the others in the pool.
.. code-block:: yaml
lvsrs:
lvs_server.present:
- protocol: tcp
- service_address: 1.1.1.1:80
- server_address: 192.168.0.11:8080
- packet_forward_method: dr
- weight: 10
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check server
server_check = __salt__["lvs.check_server"](
protocol=protocol,
service_address=service_address,
server_address=server_address,
)
if server_check is True:
server_rule_check = __salt__["lvs.check_server"](
protocol=protocol,
service_address=service_address,
server_address=server_address,
packet_forward_method=packet_forward_method,
weight=weight,
)
if server_rule_check is True:
ret["comment"] = "LVS Server {} in service {}({}) is present".format(
name, service_address, protocol
)
return ret
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"LVS Server {} in service {}({}) is present but some options should"
" update".format(name, service_address, protocol)
)
return ret
else:
server_edit = __salt__["lvs.edit_server"](
protocol=protocol,
service_address=service_address,
server_address=server_address,
packet_forward_method=packet_forward_method,
weight=weight,
)
if server_edit is True:
ret[
"comment"
] = "LVS Server {} in service {}({}) has been updated".format(
name, service_address, protocol
)
ret["changes"][name] = "Update"
return ret
else:
ret["result"] = False
ret[
"comment"
] = "LVS Server {} in service {}({}) update failed({})".format(
name, service_address, protocol, server_edit
)
return ret
else:
if __opts__["test"]:
ret[
"comment"
] = "LVS Server {} in service {}({}) is not present and needs to be created".format(
name, service_address, protocol
)
ret["result"] = None
return ret
else:
server_add = __salt__["lvs.add_server"](
protocol=protocol,
service_address=service_address,
server_address=server_address,
packet_forward_method=packet_forward_method,
weight=weight,
)
if server_add is True:
ret[
"comment"
] = "LVS Server {} in service {}({}) has been created".format(
name, service_address, protocol
)
ret["changes"][name] = "Present"
return ret
else:
ret[
"comment"
] = "LVS Service {} in service {}({}) create failed({})".format(
name, service_address, protocol, server_add
)
ret["result"] = False
return ret
def absent(name, protocol=None, service_address=None, server_address=None):
"""
Ensure the LVS Real Server in specified service is absent.
name
The name of the LVS server.
protocol
The service protocol(only support ``tcp``, ``udp`` and ``fwmark`` service).
service_address
The LVS service address.
server_address
The LVS real server address.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check if server exists and remove it
server_check = __salt__["lvs.check_server"](
protocol=protocol,
service_address=service_address,
server_address=server_address,
)
if server_check is True:
if __opts__["test"]:
ret["result"] = None
ret[
"comment"
] = "LVS Server {} in service {}({}) is present and needs to be removed".format(
name, service_address, protocol
)
return ret
server_delete = __salt__["lvs.delete_server"](
protocol=protocol,
service_address=service_address,
server_address=server_address,
)
if server_delete is True:
ret["comment"] = "LVS Server {} in service {}({}) has been removed".format(
name, service_address, protocol
)
ret["changes"][name] = "Absent"
return ret
else:
ret[
"comment"
] = "LVS Server {} in service {}({}) removed failed({})".format(
name, service_address, protocol, server_delete
)
ret["result"] = False
return ret
else:
ret[
"comment"
] = "LVS Server {} in service {}({}) is not present, so it cannot be removed".format(
name, service_address, protocol
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/lvs_server.py | 0.672439 | 0.220563 | lvs_server.py | pypi |
import salt.utils.json
from salt.exceptions import CommandExecutionError
try:
import requests
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
def __virtual__():
"""
Only load if glassfish module is available
"""
if "glassfish.enum_connector_c_pool" in __salt__ and HAS_LIBS:
return True
return (False, "glassfish module could not be loaded")
def _json_to_unicode(data):
"""
Encode json values in unicode to match that of the API
"""
ret = {}
for key, value in data.items():
if not isinstance(value, str):
if isinstance(value, dict):
ret[key] = _json_to_unicode(value)
else:
ret[key] = str(value).lower()
else:
ret[key] = value
return ret
def _is_updated(old_conf, new_conf):
"""
Compare the API results to the current statefile data
"""
changed = {}
# Dirty json hacking to get parameters in the same format
new_conf = _json_to_unicode(
salt.utils.json.loads(salt.utils.json.dumps(new_conf, ensure_ascii=False))
)
old_conf = salt.utils.json.loads(
salt.utils.json.dumps(old_conf, ensure_ascii=False)
)
for key, value in old_conf.items():
oldval = str(value).lower()
if key in new_conf:
newval = str(new_conf[key]).lower()
if oldval == "null" or oldval == "none":
oldval = ""
if key in new_conf and newval != oldval:
changed[key] = {"old": oldval, "new": newval}
return changed
def _do_element_present(name, elem_type, data, server=None):
"""
Generic function to create or update an element
"""
ret = {"changes": {}, "update": False, "create": False, "error": None}
try:
elements = __salt__["glassfish.enum_{}".format(elem_type)]()
except requests.ConnectionError as error:
if __opts__["test"]:
ret["changes"] = {"Name": name, "Params": data}
ret["create"] = True
return ret
else:
ret["error"] = "Can't connect to the server"
return ret
if not elements or name not in elements:
ret["changes"] = {"Name": name, "Params": data}
ret["create"] = True
if not __opts__["test"]:
try:
__salt__["glassfish.create_{}".format(elem_type)](
name, server=server, **data
)
except CommandExecutionError as error:
ret["error"] = error
return ret
elif elements and any(data):
current_data = __salt__["glassfish.get_{}".format(elem_type)](
name, server=server
)
data_diff = _is_updated(current_data, data)
if data_diff:
ret["update"] = True
ret["changes"] = data_diff
if not __opts__["test"]:
try:
__salt__["glassfish.update_{}".format(elem_type)](
name, server=server, **data
)
except CommandExecutionError as error:
ret["error"] = error
return ret
def _do_element_absent(name, elem_type, data, server=None):
"""
Generic function to delete an element
"""
ret = {"delete": False, "error": None}
try:
elements = __salt__["glassfish.enum_{}".format(elem_type)]()
except requests.ConnectionError as error:
if __opts__["test"]:
ret["create"] = True
return ret
else:
ret["error"] = "Can't connect to the server"
return ret
if elements and name in elements:
ret["delete"] = True
if not __opts__["test"]:
try:
__salt__["glassfish.delete_{}".format(elem_type)](
name, server=server, **data
)
except CommandExecutionError as error:
ret["error"] = error
return ret
def connection_factory_present(
name,
restype="connection_factory",
description="",
enabled=True,
min_size=1,
max_size=250,
resize_quantity=2,
idle_timeout=300,
wait_timeout=60,
reconnect_on_failure=False,
transaction_support="",
connection_validation=False,
server=None,
):
"""
Ensures that the Connection Factory is present
name
Name of the connection factory
restype
Type of the connection factory, can be either ``connection_factory``,
``queue_connection_factory` or ``topic_connection_factory``,
defaults to ``connection_factory``
description
Description of the connection factory
enabled
Is the connection factory enabled? defaults to ``true``
min_size
Minimum and initial number of connections in the pool, defaults to ``1``
max_size
Maximum number of connections that can be created in the pool, defaults to ``250``
resize_quantity
Number of connections to be removed when idle_timeout expires, defaults to ``2``
idle_timeout
Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300``
wait_timeout
Maximum time a caller can wait before timeout, in seconds, defaults to ``60``
reconnect_on_failure
Close all connections and reconnect on failure (or reconnect only when used), defaults to ``false``
transaction_support
Level of transaction support, can be either ``XATransaction``, ``LocalTransaction`` or ``NoTransaction``
connection_validation
Connection validation is required, defaults to ``false``
"""
ret = {"name": name, "result": None, "comment": None, "changes": {}}
# Manage parameters
pool_data = {}
res_data = {}
pool_name = "{}-Connection-Pool".format(name)
if restype == "topic_connection_factory":
pool_data["connectionDefinitionName"] = "javax.jms.TopicConnectionFactory"
elif restype == "queue_connection_factory":
pool_data["connectionDefinitionName"] = "javax.jms.QueueConnectionFactory"
elif restype == "connection_factory":
pool_data["connectionDefinitionName"] = "javax.jms.ConnectionFactory"
else:
ret["result"] = False
ret["comment"] = "Invalid restype"
return ret
pool_data["description"] = description
res_data["description"] = description
res_data["enabled"] = enabled
res_data["poolName"] = pool_name
pool_data["steadyPoolSize"] = min_size
pool_data["maxPoolSize"] = max_size
pool_data["poolResizeQuantity"] = resize_quantity
pool_data["idleTimeoutInSeconds"] = idle_timeout
pool_data["maxWaitTimeInMillis"] = wait_timeout * 1000
pool_data["failAllConnections"] = reconnect_on_failure
if transaction_support:
if transaction_support == "xa_transaction":
pool_data["transactionSupport"] = "XATransaction"
elif transaction_support == "local_transaction":
pool_data["transactionSupport"] = "LocalTransaction"
elif transaction_support == "no_transaction":
pool_data["transactionSupport"] = "NoTransaction"
else:
ret["result"] = False
ret["comment"] = "Invalid transaction_support"
return ret
pool_data["isConnectionValidationRequired"] = connection_validation
pool_ret = _do_element_present(pool_name, "connector_c_pool", pool_data, server)
res_ret = _do_element_present(name, "connector_resource", res_data, server)
if not pool_ret["error"] and not res_ret["error"]:
if not __opts__["test"]:
ret["result"] = True
if pool_ret["create"] or res_ret["create"]:
ret["changes"]["pool"] = pool_ret["changes"]
ret["changes"]["resource"] = res_ret["changes"]
if __opts__["test"]:
ret["comment"] = "Connection factory set to be created"
else:
ret["comment"] = "Connection factory created"
elif pool_ret["update"] or res_ret["update"]:
ret["changes"]["pool"] = pool_ret["changes"]
ret["changes"]["resource"] = res_ret["changes"]
if __opts__["test"]:
ret["comment"] = "Connection factory set to be updated"
else:
ret["comment"] = "Connection factory updated"
else:
ret["result"] = True
ret["changes"] = {}
ret["comment"] = "Connection factory is already up-to-date"
else:
ret["result"] = False
ret["comment"] = "ERROR: {} // {}".format(pool_ret["error"], res_ret["error"])
return ret
def connection_factory_absent(name, both=True, server=None):
"""
Ensures the transaction factory is absent.
name
Name of the connection factory
both
Delete both the pool and the resource, defaults to ``true``
"""
ret = {"name": name, "result": None, "comment": None, "changes": {}}
pool_name = "{}-Connection-Pool".format(name)
pool_ret = _do_element_absent(
pool_name, "connector_c_pool", {"cascade": both}, server
)
if not pool_ret["error"]:
if __opts__["test"] and pool_ret["delete"]:
ret["comment"] = "Connection Factory set to be deleted"
elif pool_ret["delete"]:
ret["result"] = True
ret["comment"] = "Connection Factory deleted"
else:
ret["result"] = True
ret["comment"] = "Connection Factory doesn't exist"
else:
ret["result"] = False
ret["comment"] = "Error: {}".format(pool_ret["error"])
return ret
def destination_present(
name, physical, restype="queue", description="", enabled=True, server=None
):
"""
Ensures that the JMS Destination Resource (queue or topic) is present
name
The JMS Queue/Topic name
physical
The Physical destination name
restype
The JMS Destination resource type, either ``queue`` or ``topic``, defaults is ``queue``
description
A description of the resource
enabled
Defaults to ``True``
"""
ret = {"name": name, "result": None, "comment": None, "changes": {}}
params = {}
# Set parameters dict
if restype == "queue":
params["resType"] = "javax.jms.Queue"
params["className"] = "com.sun.messaging.Queue"
elif restype == "topic":
params["resType"] = "javax.jms.Topic"
params["className"] = "com.sun.messaging.Topic"
else:
ret["result"] = False
ret["comment"] = "Invalid restype"
return ret
params["properties"] = {"Name": physical}
params["description"] = description
params["enabled"] = enabled
jms_ret = _do_element_present(name, "admin_object_resource", params, server)
if not jms_ret["error"]:
if not __opts__["test"]:
ret["result"] = True
if jms_ret["create"] and __opts__["test"]:
ret["comment"] = "JMS Queue set to be created"
elif jms_ret["create"]:
ret["changes"] = jms_ret["changes"]
ret["comment"] = "JMS queue created"
elif jms_ret["update"] and __opts__["test"]:
ret["comment"] = "JMS Queue set to be updated"
elif jms_ret["update"]:
ret["changes"] = jms_ret["changes"]
ret["comment"] = "JMS Queue updated"
else:
ret["result"] = True
ret["comment"] = "JMS Queue already up-to-date"
else:
ret["result"] = False
ret["comment"] = "Error from API: {}".format(jms_ret["error"])
return ret
def destination_absent(name, server=None):
"""
Ensures that the JMS Destination doesn't exists
name
Name of the JMS Destination
"""
ret = {"name": name, "result": None, "comment": None, "changes": {}}
jms_ret = _do_element_absent(name, "admin_object_resource", {}, server)
if not jms_ret["error"]:
if __opts__["test"] and jms_ret["delete"]:
ret["comment"] = "JMS Queue set to be deleted"
elif jms_ret["delete"]:
ret["result"] = True
ret["comment"] = "JMS Queue deleted"
else:
ret["result"] = True
ret["comment"] = "JMS Queue doesn't exist"
else:
ret["result"] = False
ret["comment"] = "Error: {}".format(jms_ret["error"])
return ret
def jdbc_datasource_present(
name,
description="",
enabled=True,
restype="datasource",
vendor="mysql",
sql_url="",
sql_user="",
sql_password="",
min_size=8,
max_size=32,
resize_quantity=2,
idle_timeout=300,
wait_timeout=60,
non_transactional=False,
transaction_isolation="",
isolation_guaranteed=True,
server=None,
):
"""
Ensures that the JDBC Datasource exists
name
Name of the datasource
description
Description of the datasource
enabled
Is the datasource enabled? defaults to ``true``
restype
Resource type, can be ``datasource``, ``xa_datasource``,
``connection_pool_datasource`` or ``driver``, defaults to ``datasource``
vendor
SQL Server type, currently supports ``mysql``,
``postgresql`` and ``mssql``, defaults to ``mysql``
sql_url
URL of the server in jdbc form
sql_user
Username for the server
sql_password
Password for that username
min_size
Minimum and initial number of connections in the pool, defaults to ``8``
max_size
Maximum number of connections that can be created in the pool, defaults to ``32``
resize_quantity
Number of connections to be removed when idle_timeout expires, defaults to ``2``
idle_timeout
Maximum time a connection can remain idle in the pool, in seconds, defaults to ``300``
wait_timeout
Maximum time a caller can wait before timeout, in seconds, defaults to ``60``
non_transactional
Return non-transactional connections
transaction_isolation
Defaults to the JDBC driver default
isolation_guaranteed
All connections use the same isolation level
"""
ret = {"name": name, "result": None, "comment": None, "changes": {}}
# Manage parameters
res_name = "jdbc/{}".format(name)
pool_data = {}
pool_data_properties = {}
res_data = {}
if restype == "datasource":
pool_data["resType"] = "javax.sql.DataSource"
elif restype == "xa_datasource":
pool_data["resType"] = "javax.sql.XADataSource"
elif restype == "connection_pool_datasource":
pool_data["resType"] = "javax.sql.ConnectionPoolDataSource"
elif restype == "driver":
pool_data["resType"] = "javax.sql.Driver"
datasources = {}
datasources["mysql"] = {
"driver": "com.mysql.jdbc.Driver",
"datasource": "com.mysql.jdbc.jdbc2.optional.MysqlDataSource",
"xa_datasource": "com.mysql.jdbc.jdbc2.optional.MysqlXADataSource",
"connection_pool_datasource": (
"com.mysql.jdbc.jdbc2.optional.MysqlConnectionPoolDataSource"
),
}
datasources["postgresql"] = {
"driver": "org.postgresql.Driver",
"datasource": "org.postgresql.ds.PGSimpleDataSource",
"xa_datasource": "org.postgresql.xa.PGXADataSource",
"connection_pool_datasource": "org.postgresql.ds.PGConnectionPoolDataSource",
}
datasources["mssql"] = {
"driver": "com.microsoft.sqlserver.jdbc.SQLServerDriver",
"datasource": "com.microsoft.sqlserver.jdbc.SQLServerDataSource",
"xa_datasource": "com.microsoft.sqlserver.jdbc.SQLServerXADataSource",
"connection_pool_datasource": (
"com.microsoft.sqlserver.jdbc.SQLServerConnectionPoolDataSource"
),
}
if restype == "driver":
pool_data["driverClassname"] = datasources[vendor]["driver"]
else:
pool_data["datasourceClassname"] = datasources[vendor][restype]
pool_data_properties["url"] = sql_url
pool_data_properties["user"] = sql_user
pool_data_properties["password"] = sql_password
pool_data["properties"] = pool_data_properties
pool_data["description"] = description
res_data["description"] = description
res_data["poolName"] = name
res_data["enabled"] = enabled
pool_data["steadyPoolSize"] = min_size
pool_data["maxPoolSize"] = max_size
pool_data["poolResizeQuantity"] = resize_quantity
pool_data["idleTimeoutInSeconds"] = idle_timeout
pool_data["maxWaitTimeInMillis"] = wait_timeout * 1000
pool_data["nonTransactionalConnections"] = non_transactional
pool_data["transactionIsolationLevel"] = transaction_isolation
pool_data["isIsolationLevelGuaranteed"] = isolation_guaranteed
pool_ret = _do_element_present(name, "jdbc_connection_pool", pool_data, server)
res_ret = _do_element_present(res_name, "jdbc_resource", res_data, server)
if not pool_ret["error"] and not res_ret["error"]:
if not __opts__["test"]:
ret["result"] = True
if pool_ret["create"] or res_ret["create"]:
ret["changes"]["pool"] = pool_ret["changes"]
ret["changes"]["resource"] = res_ret["changes"]
if __opts__["test"]:
ret["comment"] = "JDBC Datasource set to be created"
else:
ret["comment"] = "JDBC Datasource created"
elif pool_ret["update"] or res_ret["update"]:
ret["changes"]["pool"] = pool_ret["changes"]
ret["changes"]["resource"] = res_ret["changes"]
if __opts__["test"]:
ret["comment"] = "JDBC Datasource set to be updated"
else:
ret["comment"] = "JDBC Datasource updated"
else:
ret["result"] = True
ret["changes"] = {}
ret["comment"] = "JDBC Datasource is already up-to-date"
else:
ret["result"] = False
ret["comment"] = "ERROR: {} // {}".format(pool_ret["error"], res_ret["error"])
return ret
def jdbc_datasource_absent(name, both=True, server=None):
"""
Ensures the JDBC Datasource doesn't exists
name
Name of the datasource
both
Delete both the pool and the resource, defaults to ``true``
"""
ret = {"name": name, "result": None, "comment": None, "changes": {}}
pool_ret = _do_element_absent(
name, "jdbc_connection_pool", {"cascade": both}, server
)
if not pool_ret["error"]:
if __opts__["test"] and pool_ret["delete"]:
ret["comment"] = "JDBC Datasource set to be deleted"
elif pool_ret["delete"]:
ret["result"] = True
ret["comment"] = "JDBC Datasource deleted"
else:
ret["result"] = True
ret["comment"] = "JDBC Datasource doesn't exist"
else:
ret["result"] = False
ret["comment"] = "Error: {}".format(pool_ret["error"])
return ret
def system_properties_present(server=None, **kwargs):
"""
Ensures that the system properties are present
properties
The system properties
"""
ret = {"name": "", "result": None, "comment": None, "changes": {}}
del kwargs["name"]
try:
data = __salt__["glassfish.get_system_properties"](server=server)
except requests.ConnectionError as error:
if __opts__["test"]:
ret["changes"] = kwargs
ret["result"] = None
return ret
else:
ret["error"] = "Can't connect to the server"
return ret
ret["changes"] = {"data": data, "kwargs": kwargs}
if not data == kwargs:
data.update(kwargs)
if not __opts__["test"]:
try:
__salt__["glassfish.update_system_properties"](data, server=server)
ret["changes"] = kwargs
ret["result"] = True
ret["comment"] = "System properties updated"
except CommandExecutionError as error:
ret["comment"] = error
ret["result"] = False
else:
ret["result"] = None
ret["changes"] = kwargs
ret["coment"] = "System properties would have been updated"
else:
ret["changes"] = {}
ret["result"] = True
ret["comment"] = "System properties are already up-to-date"
return ret
def system_properties_absent(name, server=None):
"""
Ensures that the system property doesn't exists
name
Name of the system property
"""
ret = {"name": "", "result": None, "comment": None, "changes": {}}
try:
data = __salt__["glassfish.get_system_properties"](server=server)
except requests.ConnectionError as error:
if __opts__["test"]:
ret["changes"] = {"Name": name}
ret["result"] = None
return ret
else:
ret["error"] = "Can't connect to the server"
return ret
if name in data:
if not __opts__["test"]:
try:
__salt__["glassfish.delete_system_properties"](name, server=server)
ret["result"] = True
ret["comment"] = "System properties deleted"
except CommandExecutionError as error:
ret["comment"] = error
ret["result"] = False
else:
ret["result"] = None
ret["comment"] = "System properties would have been deleted"
ret["changes"] = {"Name": name}
else:
ret["result"] = True
ret["comment"] = "System properties are already absent"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/glassfish.py | 0.531209 | 0.195268 | glassfish.py | pypi |
import hashlib
import logging
import re
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_elb.exists" in __salt__:
return "boto_elb"
return (False, "boto_elb module could not be loaded")
def present(
name,
listeners,
availability_zones=None,
subnets=None,
subnet_names=None,
security_groups=None,
scheme="internet-facing",
health_check=None,
attributes=None,
attributes_from_pillar="boto_elb_attributes",
cnames=None,
alarms=None,
alarms_from_pillar="boto_elb_alarms",
policies=None,
policies_from_pillar="boto_elb_policies",
backends=None,
region=None,
key=None,
keyid=None,
profile=None,
wait_for_sync=True,
tags=None,
instance_ids=None,
instance_names=None,
):
"""
Ensure the ELB exists.
name
Name of the ELB.
availability_zones
A list of availability zones for this ELB.
listeners
A list of listener lists; example::
[
['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'],
['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert']
]
subnets
A list of subnet IDs in your VPC to attach to your LoadBalancer.
subnet_names
A list of subnet names in your VPC to attach to your LoadBalancer.
security_groups
The security groups assigned to your LoadBalancer within your VPC. Must
be passed either as a list or a comma-separated string.
For example, a list:
.. code-block:: yaml
- security_groups:
- secgroup-one
- secgroup-two
Or as a comma-separated string:
.. code-block:: yaml
- security_groups: secgroup-one,secgroup-two
scheme
The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once
set, can not be modified.
health_check
A dict defining the health check for this ELB.
attributes
A dict defining the attributes to set on this ELB.
Unknown keys will be silently ignored.
See the :mod:`salt.modules.boto_elb.set_attributes` function for
recognized attributes.
attributes_from_pillar
name of pillar dict that contains attributes. Attributes defined for this specific
state will override those from pillar.
cnames
A list of cname dicts with attributes needed for the DNS add_record state.
By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier.
See the boto_route53 state for information about these attributes.
Other DNS modules can be called by specifying the provider keyword.
the cnames dict will be passed to the state as kwargs.
See the :mod:`salt.states.boto_route53` state for information about
these attributes.
alarms:
a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB.
All attributes should be specified except for dimension which will be
automatically set to this ELB.
See the :mod:`salt.states.boto_cloudwatch_alarm` state for information
about these attributes.
alarms_from_pillar:
name of pillar dict that contains alarm settings. Alarms defined for this specific
state will override those from pillar.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53.
tags
dict of tags
instance_ids
list of instance ids. The state will ensure that these, and ONLY these, instances
are registered with the ELB. This is additive with instance_names.
instance_names
list of instance names. The state will ensure that these, and ONLY these, instances
are registered with the ELB. This is additive with instance_ids.
"""
# load data from attributes_from_pillar and merge with attributes
tmp = __salt__["config.option"](attributes_from_pillar, {})
attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if not isinstance(security_groups, (str, list, type(None))):
msg = (
"The 'security_group' parameter must be either a list or a "
"comma-separated string."
)
log.error(msg)
ret.update({"comment": msg, "result": False})
return ret
if isinstance(security_groups, str):
security_groups = security_groups.split(",")
_ret = _elb_present(
name,
availability_zones,
listeners,
subnets,
subnet_names,
security_groups,
scheme,
region,
key,
keyid,
profile,
)
ret.update(
{
"changes": _ret["changes"],
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
exists = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if not exists and __opts__["test"]:
return ret
if attributes:
_ret = _attributes_present(name, attributes, region, key, keyid, profile)
ret.update(
{
"changes": salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _health_check_present(name, health_check, region, key, keyid, profile)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
if cnames:
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if lb:
for cname in cnames:
_ret = None
dns_provider = "boto_route53"
cname.update({"record_type": "CNAME", "value": lb["dns_name"]})
if "provider" in cname:
dns_provider = cname.pop("provider")
if dns_provider == "boto_route53":
for p in ("profile", "key", "keyid", "region", "wait_for_sync"):
cname[p] = locals().get(p) if p not in cname else cname[p]
_ret = __states__["boto_route53.present"](**cname)
ret.update(
{
"changes": salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _alarms_present(
name, alarms, alarms_from_pillar, region, key, keyid, profile
)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _policies_present(
name,
policies,
policies_from_pillar,
listeners,
backends,
region,
key,
keyid,
profile,
)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _tags_present(name, tags, region, key, keyid, profile)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
if not instance_ids:
instance_ids = []
if instance_names:
# AWS borks on adding instances in "non-running" states, so filter 'em out.
running_states = ("pending", "rebooting", "running", "stopping", "stopped")
for n in instance_names:
instance_ids += __salt__["boto_ec2.find_instances"](
name=n,
region=region,
key=key,
keyid=keyid,
profile=profile,
in_states=running_states,
)
# Backwards compat: Only touch attached instances if requested (e.g. if some are defined).
if instance_ids:
if __opts__["test"]:
if __salt__["boto_elb.set_instances"](
name, instance_ids, True, region, key, keyid, profile
):
ret["comment"] += " ELB {} instances would be updated.".format(name)
ret["result"] = None
else:
success = __salt__["boto_elb.set_instances"](
name, instance_ids, False, region, key, keyid, profile
)
if not success:
ret["comment"] += "Failed to set requested instances."
ret["result"] = False
return ret
def register_instances(
name, instances, region=None, key=None, keyid=None, profile=None
):
"""
Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from
the ``instances`` list does not remove it from the ELB.
name
The name of the Elastic Load Balancer to add EC2 instances to.
instances
A list of EC2 instance IDs that this Elastic Load Balancer should
distribute traffic to. This state will only ever append new instances
to the ELB. EC2 instances already associated with this ELB will not be
removed if they are not in the ``instances`` list.
.. versionadded:: 2015.8.0
.. code-block:: yaml
add-instances:
boto_elb.register_instances:
- name: myloadbalancer
- instances:
- instance-id1
- instance-id2
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if not lb:
msg = "Could not find lb {}".format(name)
log.error(msg)
ret.update({"comment": msg, "result": False})
return ret
health = __salt__["boto_elb.get_instance_health"](name, region, key, keyid, profile)
nodes = [
value["instance_id"]
for value in health
if value["description"] != "Instance deregistration currently in progress."
]
new = [value for value in instances if value not in nodes]
if not new:
msg = "Instance/s {} already exist.".format(str(instances).strip("[]"))
log.debug(msg)
ret.update({"comment": msg})
return ret
if __opts__["test"]:
ret["comment"] = "ELB {} is set to register : {}.".format(name, new)
ret["result"] = None
return ret
state = __salt__["boto_elb.register_instances"](
name, instances, region, key, keyid, profile
)
if state:
msg = "Load Balancer {} has been changed".format(name)
log.info(msg)
new = set().union(nodes, instances)
ret.update(
{
"comment": msg,
"changes": {"old": "\n".join(nodes), "new": "\n".join(list(new))},
}
)
else:
msg = "Load balancer {} failed to add instances".format(name)
log.error(msg)
ret.update({"comment": msg, "result": False})
return ret
DEFAULT_PILLAR_LISTENER_POLICY_KEY = "boto_elb_listener_policies"
def _elb_present(
name,
availability_zones,
listeners,
subnets,
subnet_names,
security_groups,
scheme,
region,
key,
keyid,
profile,
):
ret = {"result": True, "comment": "", "changes": {}}
if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)):
raise SaltInvocationError(
"Exactly one of availability_zones, subnets, "
"subnet_names must be provided as arguments."
)
if not listeners:
listeners = []
for listener in listeners:
if len(listener) < 3:
raise SaltInvocationError(
"Listeners must have at minimum port,"
" instance_port and protocol values in"
" the provided list."
)
if "elb_port" not in listener:
raise SaltInvocationError("elb_port is a required value for listeners.")
if "instance_port" not in listener:
raise SaltInvocationError(
"instance_port is a required value for listeners."
)
if "elb_protocol" not in listener:
raise SaltInvocationError("elb_protocol is a required value for listeners.")
listener["elb_protocol"] = listener["elb_protocol"].upper()
if listener["elb_protocol"] == "HTTPS" and "certificate" not in listener:
raise SaltInvocationError(
"certificate is a required value for"
" listeners if HTTPS is set for"
" elb_protocol."
)
# best attempt at principle of least surprise here:
# only use the default pillar in cases where we don't explicitly
# define policies OR policies_from_pillar on a listener
policies = listener.setdefault("policies", [])
policies_pillar = listener.get("policies_from_pillar", None)
if not policies and policies_pillar is None:
policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY
if policies_pillar:
policies += __salt__["pillar.get"](policies_pillar, {}).get(
listener["elb_protocol"], []
)
# Look up subnet ids from names if provided
if subnet_names:
subnets = []
for i in subnet_names:
r = __salt__["boto_vpc.get_resource_id"](
"subnet", name=i, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["comment"] = "Error looking up subnet ids: {}".format(r["error"])
ret["result"] = False
return ret
if "id" not in r:
ret["comment"] = "Subnet {} does not exist.".format(i)
ret["result"] = False
return ret
subnets.append(r["id"])
_security_groups = None
if subnets:
vpc_id = __salt__["boto_vpc.get_subnet_association"](
subnets, region, key, keyid, profile
)
vpc_id = vpc_id.get("vpc_id")
if not vpc_id:
ret["comment"] = "Subnets {} do not map to a valid vpc id.".format(subnets)
ret["result"] = False
return ret
_security_groups = __salt__["boto_secgroup.convert_to_group_ids"](
security_groups,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not _security_groups:
ret[
"comment"
] = "Security groups {} do not map to valid security group ids.".format(
security_groups
)
ret["result"] = False
return ret
exists = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if not exists:
if __opts__["test"]:
ret["comment"] = "ELB {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_elb.create"](
name=name,
availability_zones=availability_zones,
listeners=listeners,
subnets=subnets,
security_groups=_security_groups,
scheme=scheme,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"]["old"] = {"elb": None}
ret["changes"]["new"] = {"elb": name}
ret["comment"] = "ELB {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} ELB.".format(name)
else:
ret["comment"] = "ELB {} present.".format(name)
_ret = _security_groups_present(
name, _security_groups, region, key, keyid, profile
)
ret["changes"] = salt.utils.dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _listeners_present(name, listeners, region, key, keyid, profile)
ret["changes"] = salt.utils.dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
if availability_zones:
_ret = _zones_present(name, availability_zones, region, key, keyid, profile)
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
)
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
elif subnets:
_ret = _subnets_present(name, subnets, region, key, keyid, profile)
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
)
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _listeners_present(name, listeners, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["comment"] = "{} ELB configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
if not listeners:
listeners = []
expected_listeners_by_tuple = {}
for l in listeners:
l_key = __salt__["boto_elb.listener_dict_to_tuple"](l)
expected_listeners_by_tuple[l_key] = l
actual_listeners_by_tuple = {}
for l in lb["listeners"]:
l_key = __salt__["boto_elb.listener_dict_to_tuple"](l)
actual_listeners_by_tuple[l_key] = l
to_delete = []
to_create = []
for t, l in expected_listeners_by_tuple.items():
if t not in actual_listeners_by_tuple:
to_create.append(l)
for t, l in actual_listeners_by_tuple.items():
if t not in expected_listeners_by_tuple:
to_delete.append(l)
if __opts__["test"]:
msg = []
if to_create or to_delete:
msg.append("ELB {} set to have listeners modified:".format(name))
for listener in to_create:
msg.append(
"Listener {} added.".format(
__salt__["boto_elb.listener_dict_to_tuple"](listener)
)
)
for listener in to_delete:
msg.append(
"Listener {} deleted.".format(
__salt__["boto_elb.listener_dict_to_tuple"](listener)
)
)
ret["result"] = None
else:
msg.append("Listeners already set on ELB {}.".format(name))
ret["comment"] = " ".join(msg)
return ret
if to_delete:
ports = [l["elb_port"] for l in to_delete]
deleted = __salt__["boto_elb.delete_listeners"](
name, ports, region, key, keyid, profile
)
if deleted:
ret["comment"] = "Deleted listeners on {} ELB.".format(name)
else:
ret["comment"] = "Failed to delete listeners on {} ELB.".format(name)
ret["result"] = False
if to_create:
created = __salt__["boto_elb.create_listeners"](
name, to_create, region, key, keyid, profile
)
if created:
msg = "Created listeners on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
else:
msg = "Failed to create listeners on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
ret["result"] = False
if to_create or to_delete:
ret["changes"]["listeners"] = {}
ret["changes"]["listeners"]["old"] = lb["listeners"]
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
ret["changes"]["listeners"]["new"] = lb["listeners"]
else:
ret["comment"] = "Listeners already set on ELB {}.".format(name)
return ret
def _security_groups_present(name, security_groups, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["comment"] = "{} ELB configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
if not security_groups:
security_groups = []
change_needed = False
if set(security_groups) != set(lb["security_groups"]):
change_needed = True
if change_needed:
if __opts__["test"]:
ret["comment"] = "ELB {} set to have security groups modified.".format(name)
ret["result"] = None
return ret
changed = __salt__["boto_elb.apply_security_groups"](
name, security_groups, region, key, keyid, profile
)
if changed:
ret["comment"] = "Modified security_groups on {} ELB.".format(name)
else:
ret["comment"] = "Failed to modify security_groups on {} ELB.".format(name)
ret["result"] = False
ret["changes"]["old"] = {"security_groups": lb["security_groups"]}
ret["changes"]["new"] = {"security_groups": security_groups}
else:
ret["comment"] = "security_groups already set on ELB {}.".format(name)
return ret
def _attributes_present(name, attributes, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
_attributes = __salt__["boto_elb.get_attributes"](name, region, key, keyid, profile)
if not _attributes:
ret["result"] = False
ret["comment"] = "Failed to retrieve attributes for ELB {}.".format(name)
return ret
attrs_to_set = []
if "cross_zone_load_balancing" in attributes:
czlb = attributes["cross_zone_load_balancing"]
_czlb = _attributes["cross_zone_load_balancing"]
if czlb["enabled"] != _czlb["enabled"]:
attrs_to_set.append("cross_zone_load_balancing")
if "connection_draining" in attributes:
cd = attributes["connection_draining"]
_cd = _attributes["connection_draining"]
if cd["enabled"] != _cd["enabled"] or cd.get("timeout", 300) != _cd.get(
"timeout"
):
attrs_to_set.append("connection_draining")
if "connecting_settings" in attributes:
cs = attributes["connecting_settings"]
_cs = _attributes["connecting_settings"]
if cs["idle_timeout"] != _cs["idle_timeout"]:
attrs_to_set.append("connecting_settings")
if "access_log" in attributes:
for attr, val in attributes["access_log"].items():
if str(_attributes["access_log"][attr]) != str(val):
attrs_to_set.append("access_log")
if "s3_bucket_prefix" in attributes["access_log"]:
sbp = attributes["access_log"]["s3_bucket_prefix"]
if sbp.startswith("/") or sbp.endswith("/"):
raise SaltInvocationError(
"s3_bucket_prefix can not start or end with /."
)
if attrs_to_set:
if __opts__["test"]:
ret["comment"] = "ELB {} set to have attributes set.".format(name)
ret["result"] = None
return ret
was_set = __salt__["boto_elb.set_attributes"](
name, attributes, region, key, keyid, profile
)
if was_set:
ret["changes"]["old"] = {"attributes": _attributes}
ret["changes"]["new"] = {"attributes": attributes}
ret["comment"] = "Set attributes on ELB {}.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to set attributes on ELB {}.".format(name)
else:
ret["comment"] = "Attributes already set on ELB {}.".format(name)
return ret
def _health_check_present(name, health_check, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
if not health_check:
health_check = {}
_health_check = __salt__["boto_elb.get_health_check"](
name, region, key, keyid, profile
)
if not _health_check:
ret["result"] = False
ret["comment"] = "Failed to retrieve health_check for ELB {}.".format(name)
return ret
need_to_set = False
for attr, val in health_check.items():
if str(_health_check[attr]) != str(val):
need_to_set = True
if need_to_set:
if __opts__["test"]:
ret["comment"] = "ELB {} set to have health check set.".format(name)
ret["result"] = None
return ret
was_set = __salt__["boto_elb.set_health_check"](
name, health_check, region, key, keyid, profile
)
if was_set:
ret["changes"]["old"] = {"health_check": _health_check}
_health_check = __salt__["boto_elb.get_health_check"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"health_check": _health_check}
ret["comment"] = "Set health check on ELB {}.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to set health check on ELB {}.".format(name)
else:
ret["comment"] = "Health check already set on ELB {}.".format(name)
return ret
def _zones_present(name, availability_zones, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["result"] = False
ret["comment"] = "Failed to retrieve ELB {}.".format(name)
return ret
to_enable = []
to_disable = []
_zones = lb["availability_zones"]
for zone in availability_zones:
if zone not in _zones:
to_enable.append(zone)
for zone in _zones:
if zone not in availability_zones:
to_disable.append(zone)
if to_enable or to_disable:
if __opts__["test"]:
ret["comment"] = "ELB {} to have availability zones set.".format(name)
ret["result"] = None
return ret
if to_enable:
enabled = __salt__["boto_elb.enable_availability_zones"](
name, to_enable, region, key, keyid, profile
)
if enabled:
ret["comment"] = "Enabled availability zones on {} ELB.".format(name)
else:
ret[
"comment"
] = "Failed to enable availability zones on {} ELB.".format(name)
ret["result"] = False
if to_disable:
disabled = __salt__["boto_elb.disable_availability_zones"](
name, to_disable, region, key, keyid, profile
)
if disabled:
msg = "Disabled availability zones on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
else:
msg = "Failed to disable availability zones on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
ret["result"] = False
ret["changes"]["old"] = {"availability_zones": lb["availability_zones"]}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
ret["changes"]["new"] = {"availability_zones": lb["availability_zones"]}
else:
ret["comment"] = "Availability zones already set on ELB {}.".format(name)
return ret
def _subnets_present(name, subnets, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
if not subnets:
subnets = []
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["result"] = False
ret["comment"] = "Failed to retrieve ELB {}.".format(name)
return ret
to_enable = []
to_disable = []
_subnets = lb["subnets"]
for subnet in subnets:
if subnet not in _subnets:
to_enable.append(subnet)
for subnet in _subnets:
if subnet not in subnets:
to_disable.append(subnet)
if to_enable or to_disable:
if __opts__["test"]:
ret["comment"] = "ELB {} to have subnets set.".format(name)
ret["result"] = None
return ret
if to_enable:
attached = __salt__["boto_elb.attach_subnets"](
name, to_enable, region, key, keyid, profile
)
if attached:
ret["comment"] = "Attached subnets on {} ELB.".format(name)
else:
ret["comment"] = "Failed to attach subnets on {} ELB.".format(name)
ret["result"] = False
if to_disable:
detached = __salt__["boto_elb.detach_subnets"](
name, to_disable, region, key, keyid, profile
)
if detached:
ret["comment"] = " ".join(
[ret["comment"], "Detached subnets on {} ELB.".format(name)]
)
else:
ret["comment"] = " ".join(
[
ret["comment"],
"Failed to detach subnets on {} ELB.".format(name),
]
)
ret["result"] = False
ret["changes"]["old"] = {"subnets": lb["subnets"]}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
ret["changes"]["new"] = {"subnets": lb["subnets"]}
else:
ret["comment"] = "Subnets already set on ELB {}.".format(name)
return ret
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
"""helper method for present. ensure that cloudwatch_alarms are set"""
current = __salt__["config.option"](alarms_from_pillar, {})
if alarms:
current = salt.utils.dictupdate.update(current, alarms)
ret = {"name": name, "result": True, "comment": "", "changes": {}}
for _, info in current.items():
info["name"] = name + " " + info["name"]
info["attributes"]["description"] = (
name + " " + info["attributes"]["description"]
)
info["attributes"]["dimensions"] = {"LoadBalancerName": [name]}
kwargs = {
"name": info["name"],
"attributes": info["attributes"],
"region": region,
"key": key,
"keyid": keyid,
"profile": profile,
}
# No test=False cluase needed since the state handles that itself...
results = __states__["boto_cloudwatch_alarm.present"](**kwargs)
if not results.get("result"):
ret["result"] = results["result"]
if results.get("changes", {}) != {}:
ret["changes"][info["name"]] = results["changes"]
if "comment" in results:
ret["comment"] += results["comment"]
return ret
def _policies_present(
name,
policies,
policies_from_pillar,
listeners,
backends,
region,
key,
keyid,
profile,
):
"""helper method for present. ensure that ELB policies are set"""
if policies is None:
policies = []
pillar_policies = __salt__["config.option"](policies_from_pillar, [])
policies = policies + pillar_policies
if backends is None:
backends = []
# check for policy name uniqueness and correct type
policy_names = set()
for p in policies:
if "policy_name" not in p:
raise SaltInvocationError("policy_name is a required value for policies.")
if "policy_type" not in p:
raise SaltInvocationError("policy_type is a required value for policies.")
if "policy" not in p:
raise SaltInvocationError("policy is a required value for listeners.")
# check for unique policy names
if p["policy_name"] in policy_names:
raise SaltInvocationError(
"Policy names must be unique: policy {} is declared twice.".format(
p["policy_name"]
)
)
policy_names.add(p["policy_name"])
# check that listeners refer to valid policy names
for l in listeners:
for p in l.get("policies", []):
if p not in policy_names:
raise SaltInvocationError(
"Listener {} on ELB {} refers to undefined policy {}.".format(
l["elb_port"], name, p
)
)
# check that backends refer to valid policy names
for b in backends:
for p in b.get("policies", []):
if p not in policy_names:
raise SaltInvocationError(
"Backend {} on ELB {} refers to undefined policy {}.".format(
b["instance_port"], name, p
)
)
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["comment"] = "{} ELB configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
# Policies have two names:
# - a short name ('name') that's only the policy name (e.g. testpolicy)
# - a canonical name ('cname') that contains the policy type and hash
# (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524)
policies_by_cname = {}
cnames_by_name = {}
for p in policies:
cname = _policy_cname(p)
policies_by_cname[cname] = p
cnames_by_name[p["policy_name"]] = cname
expected_policy_names = policies_by_cname.keys()
actual_policy_names = lb["policies"]
# This is sadly a huge hack to get around the fact that AWS assigns a
# default SSLNegotiationPolicyType policy (with the naming scheme
# ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an
# explicit policy set. If we don't keep track of the default policies and
# explicitly exclude them from deletion, orchestration will fail because we
# attempt to delete the default policy that's being used by listeners that
# were created with no explicit policy.
default_aws_policies = set()
expected_policies_by_listener = {}
for l in listeners:
expected_policies_by_listener[l["elb_port"]] = {
cnames_by_name[p] for p in l.get("policies", [])
}
actual_policies_by_listener = {}
for l in lb["listeners"]:
listener_policies = set(l.get("policies", []))
actual_policies_by_listener[l["elb_port"]] = listener_policies
# Determine if any actual listener policies look like default policies,
# so we can exclude them from deletion below (see note about this hack
# above).
for p in listener_policies:
if re.match(r"^ELBSecurityPolicy-\d{4}-\d{2}$", p):
default_aws_policies.add(p)
expected_policies_by_backend = {}
for b in backends:
expected_policies_by_backend[b["instance_port"]] = {
cnames_by_name[p] for p in b.get("policies", [])
}
actual_policies_by_backend = {}
for b in lb["backends"]:
backend_policies = set(b.get("policies", []))
actual_policies_by_backend[b["instance_port"]] = backend_policies
to_delete = []
to_create = []
for policy_name in expected_policy_names:
if policy_name not in actual_policy_names:
to_create.append(policy_name)
for policy_name in actual_policy_names:
if policy_name not in expected_policy_names:
if policy_name not in default_aws_policies:
to_delete.append(policy_name)
listeners_to_update = set()
for port, policies in expected_policies_by_listener.items():
if policies != actual_policies_by_listener.get(port, set()):
listeners_to_update.add(port)
for port, policies in actual_policies_by_listener.items():
if policies != expected_policies_by_listener.get(port, set()):
listeners_to_update.add(port)
backends_to_update = set()
for port, policies in expected_policies_by_backend.items():
if policies != actual_policies_by_backend.get(port, set()):
backends_to_update.add(port)
for port, policies in actual_policies_by_backend.items():
if policies != expected_policies_by_backend.get(port, set()):
backends_to_update.add(port)
if __opts__["test"]:
msg = []
if to_create or to_delete:
msg.append("ELB {} set to have policies modified:".format(name))
for policy in to_create:
msg.append("Policy {} added.".format(policy))
for policy in to_delete:
msg.append("Policy {} deleted.".format(policy))
ret["result"] = None
else:
msg.append("Policies already set on ELB {}.".format(name))
for listener in listeners_to_update:
msg.append("Listener {} policies updated.".format(listener))
for backend in backends_to_update:
msg.append("Backend {} policies updated.".format(backend))
ret["comment"] = " ".join(msg)
return ret
if to_create:
for policy_name in to_create:
created = __salt__["boto_elb.create_policy"](
name=name,
policy_name=policy_name,
policy_type=policies_by_cname[policy_name]["policy_type"],
policy=policies_by_cname[policy_name]["policy"],
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"].setdefault(policy_name, {})["new"] = policy_name
comment = "Policy {} was created on ELB {}".format(policy_name, name)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
for port in listeners_to_update:
policy_set = __salt__["boto_elb.set_listener_policy"](
name=name,
port=port,
policies=list(expected_policies_by_listener.get(port, [])),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if policy_set:
policy_key = "listener_{}_policy".format(port)
ret["changes"][policy_key] = {
"old": list(actual_policies_by_listener.get(port, [])),
"new": list(expected_policies_by_listener.get(port, [])),
}
comment = "Policy {} was created on ELB {} listener {}".format(
expected_policies_by_listener[port], name, port
)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
for port in backends_to_update:
policy_set = __salt__["boto_elb.set_backend_policy"](
name=name,
port=port,
policies=list(expected_policies_by_backend.get(port, [])),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if policy_set:
policy_key = "backend_{}_policy".format(port)
ret["changes"][policy_key] = {
"old": list(actual_policies_by_backend.get(port, [])),
"new": list(expected_policies_by_backend.get(port, [])),
}
comment = "Policy {} was created on ELB {} backend {}".format(
expected_policies_by_backend[port], name, port
)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
if to_delete:
for policy_name in to_delete:
deleted = __salt__["boto_elb.delete_policy"](
name=name,
policy_name=policy_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if deleted:
ret["changes"].setdefault(policy_name, {})["old"] = policy_name
comment = "Policy {} was deleted from ELB {}".format(policy_name, name)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
return ret
def _policy_cname(policy_dict):
policy_name = policy_dict["policy_name"]
policy_type = policy_dict["policy_type"]
policy = policy_dict["policy"]
canonical_policy_repr = str(sorted(list(policy.items()), key=lambda x: str(x[0])))
policy_hash = hashlib.md5(
salt.utils.stringutils.to_bytes(str(canonical_policy_repr))
).hexdigest()
if policy_type.endswith("Type"):
policy_type = policy_type[:-4]
return "{}-{}-{}".format(policy_type, policy_name, policy_hash)
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure an ELB does not exist
name
name of the ELB
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if exists:
if __opts__["test"]:
ret["comment"] = "ELB {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_elb.delete"](name, region, key, keyid, profile)
if deleted:
ret["changes"]["old"] = {"elb": name}
ret["changes"]["new"] = {"elb": None}
ret["comment"] = "ELB {} deleted.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} ELB.".format(name)
else:
ret["comment"] = "{} ELB does not exist.".format(name)
return ret
def _tags_present(name, tags, region, key, keyid, profile):
"""
helper function to validate tags on elb
"""
ret = {"result": True, "comment": "", "changes": {}}
if tags:
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
tags_to_add = tags
tags_to_update = {}
tags_to_remove = []
if lb.get("tags"):
for _tag in lb["tags"]:
if _tag not in tags.keys():
if _tag not in tags_to_remove:
tags_to_remove.append(_tag)
else:
if tags[_tag] != lb["tags"][_tag]:
tags_to_update[_tag] = tags[_tag]
tags_to_add.pop(_tag)
if tags_to_remove:
if __opts__["test"]:
msg = "The following tag{} set to be removed: {}.".format(
("s are" if len(tags_to_remove) > 1 else " is"),
", ".join(tags_to_remove),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
else:
_ret = __salt__["boto_elb.delete_tags"](
name, tags_to_remove, region, key, keyid, profile
)
if not _ret:
ret["result"] = False
msg = "Error attempting to delete tag {}.".format(tags_to_remove)
ret["comment"] = " ".join([ret["comment"], msg])
return ret
if "old" not in ret["changes"]:
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], {"old": {"tags": {}}}
)
for _tag in tags_to_remove:
ret["changes"]["old"]["tags"][_tag] = lb["tags"][_tag]
if tags_to_add or tags_to_update:
if __opts__["test"]:
if tags_to_add:
msg = "The following tag{} set to be added: {}.".format(
("s are" if len(tags_to_add.keys()) > 1 else " is"),
", ".join(tags_to_add.keys()),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
if tags_to_update:
msg = "The following tag {} set to be updated: {}.".format(
(
"values are"
if len(tags_to_update.keys()) > 1
else "value is"
),
", ".join(tags_to_update.keys()),
)
ret["comment"] = " ".join([ret["comment"], msg])
else:
all_tag_changes = salt.utils.dictupdate.update(
tags_to_add, tags_to_update
)
_ret = __salt__["boto_elb.set_tags"](
name, all_tag_changes, region, key, keyid, profile
)
if not _ret:
ret["result"] = False
msg = "Error attempting to set tags."
ret["comment"] = " ".join([ret["comment"], msg])
return ret
if "old" not in ret["changes"]:
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], {"old": {"tags": {}}}
)
if "new" not in ret["changes"]:
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], {"new": {"tags": {}}}
)
for tag in all_tag_changes:
ret["changes"]["new"]["tags"][tag] = tags[tag]
if "tags" in lb:
if lb["tags"]:
if tag in lb["tags"]:
ret["changes"]["old"]["tags"][tag] = lb["tags"][tag]
if not tags_to_update and not tags_to_remove and not tags_to_add:
msg = "Tags are already set."
ret["comment"] = " ".join([ret["comment"], msg])
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_elb.py | 0.646125 | 0.187133 | boto_elb.py | pypi |
def __virtual__():
"""
Only make this state available if the monit module is available.
"""
if "monit.summary" in __salt__:
return "monit"
return (False, "monit module could not be loaded")
def monitor(name):
"""
Get the summary from module monit and try to see if service is
being monitored. If not then monitor the service.
"""
ret = {"result": None, "name": name, "comment": "", "changes": {}}
result = __salt__["monit.summary"](name)
try:
for key, value in result.items():
if "Running" in value[name]:
ret["comment"] = "{} is being being monitored.".format(name)
ret["result"] = True
else:
if __opts__["test"]:
ret["comment"] = "Service {} is set to be monitored.".format(name)
ret["result"] = None
return ret
__salt__["monit.monitor"](name)
ret["comment"] = "{} started to be monitored.".format(name)
ret["changes"][name] = "Running"
ret["result"] = True
break
except KeyError:
ret["comment"] = "{} not found in configuration.".format(name)
ret["result"] = False
return ret
def unmonitor(name):
"""
Get the summary from module monit and try to see if service is
being monitored. If it is then stop monitoring the service.
"""
ret = {"result": None, "name": name, "comment": "", "changes": {}}
result = __salt__["monit.summary"](name)
try:
for key, value in result.items():
if "Not monitored" in value[name]:
ret["comment"] = "{} is not being monitored.".format(name)
ret["result"] = True
else:
if __opts__["test"]:
ret["comment"] = "Service {} is set to be unmonitored.".format(name)
ret["result"] = None
return ret
__salt__["monit.unmonitor"](name)
ret["comment"] = "{} stopped being monitored.".format(name)
ret["changes"][name] = "Not monitored"
ret["result"] = True
break
except KeyError:
ret["comment"] = "{} not found in configuration.".format(name)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/monit.py | 0.465387 | 0.219599 | monit.py | pypi |
import logging
import salt.utils.json
from salt.utils.versions import LooseVersion
log = logging.getLogger(__name__)
__virtualname__ = "boto3_elasticsearch"
def __virtual__():
"""
Only load if boto3 and the required module functions are available.
"""
requirements = {
"salt": [
"boto3_elasticsearch.describe_elasticsearch_domain",
"boto3_elasticsearch.create_elasticsearch_domain",
"boto3_elasticsearch.update_elasticsearch_domain_config",
"boto3_elasticsearch.exists",
"boto3_elasticsearch.get_upgrade_status",
"boto3_elasticsearch.wait_for_upgrade",
"boto3_elasticsearch.check_upgrade_eligibility",
"boto3_elasticsearch.upgrade_elasticsearch_domain",
],
}
for req in requirements["salt"]:
if req not in __salt__:
return (
False,
"A required function was not found in __salt__: {}".format(req),
)
return __virtualname__
def _check_return_value(ret):
"""
Helper function to check if the 'result' key of the return value has been
properly set. This is to detect unexpected code-paths that would otherwise
return a 'success'-y value but not actually be successful.
:param dict ret: The returned value of a state function.
"""
if ret["result"] == "oops":
ret["result"] = False
ret["comment"].append(
"An internal error has occurred: The result value was not properly changed."
)
return ret
def present(
name,
elasticsearch_version=None,
elasticsearch_cluster_config=None,
ebs_options=None,
access_policies=None,
snapshot_options=None,
vpc_options=None,
cognito_options=None,
encryption_at_rest_options=None,
node_to_node_encryption_options=None,
advanced_options=None,
log_publishing_options=None,
blocking=True,
tags=None,
region=None,
keyid=None,
key=None,
profile=None,
):
"""
Ensure an Elasticsearch Domain exists.
:param str name: The name of the Elasticsearch domain that you are creating.
Domain names are unique across the domains owned by an account within an
AWS region. Domain names must start with a letter or number and can contain
the following characters: a-z (lowercase), 0-9, and - (hyphen).
:param str elasticsearch_version: String of format X.Y to specify version for
the Elasticsearch domain eg. "1.5" or "2.3".
:param dict elasticsearch_cluster_config: Dict specifying the configuration
options for an Elasticsearch domain.
Keys (case sensitive) in here are:
- InstanceType (str): The instance type for an Elasticsearch cluster.
- InstanceCount (int): The instance type for an Elasticsearch cluster.
- DedicatedMasterEnabled (bool): Indicate whether a dedicated master
node is enabled.
- ZoneAwarenessEnabled (bool): Indicate whether zone awareness is enabled.
- ZoneAwarenessConfig (dict): Specifies the zone awareness configuration
for a domain when zone awareness is enabled.
Keys (case sensitive) in here are:
- AvailabilityZoneCount (int): An integer value to indicate the
number of availability zones for a domain when zone awareness is
enabled. This should be equal to number of subnets if VPC endpoints
is enabled.
- DedicatedMasterType (str): The instance type for a dedicated master node.
- DedicatedMasterCount (int): Total number of dedicated master nodes,
active and on standby, for the cluster.
:param dict ebs_options: Dict specifying the options to enable or disable and
specifying the type and size of EBS storage volumes.
Keys (case sensitive) in here are:
- EBSEnabled (bool): Specifies whether EBS-based storage is enabled.
- VolumeType (str): Specifies the volume type for EBS-based storage.
- VolumeSize (int): Integer to specify the size of an EBS volume.
- Iops (int): Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
:type access_policies: str or dict
:param access_policies: Dict or JSON string with the IAM access policy.
:param dict snapshot_options: Dict specifying the snapshot options.
Keys (case senstive) in here are:
- AutomatedSnapshotStartHour (int): Specifies the time, in UTC format,
when the service takes a daily automated snapshot of the specified
Elasticsearch domain. Default value is 0 hours.
:param dict vpc_options: Dict with the options to specify the subnets and security
groups for the VPC endpoint.
Keys (case sensitive) in here are:
- SubnetIds (list): The list of subnets for the VPC endpoint.
- SecurityGroupIds (list): The list of security groups for the VPC endpoint.
:param dict cognito_options: Dict with options to specify the cognito user and
identity pools for Kibana authentication.
Keys (case senstive) in here are:
- Enabled (bool): Specifies the option to enable Cognito for Kibana authentication.
- UserPoolId (str): Specifies the Cognito user pool ID for Kibana authentication.
- IdentityPoolId (str): Specifies the Cognito identity pool ID for Kibana authentication.
- RoleArn (str): Specifies the role ARN that provides Elasticsearch permissions
for accessing Cognito resources.
:param dict encryption_at_rest_options: Dict specifying the encryption at rest
options. This option can only be used for the creation of a new Elasticsearch
domain.
Keys (case sensitive) in here are:
- Enabled (bool): Specifies the option to enable Encryption At Rest.
- KmsKeyId (str): Specifies the KMS Key ID for Encryption At Rest options.
:param dict node_to_node_encryption_options: Dict specifying the node to node
encryption options. This option can only be used for the creation of
a new Elasticsearch domain.
Keys (case sensitive) in here are:
- Enabled (bool): Specify True to enable node-to-node encryption.
:param dict advanced_options: Dict with option to allow references to indices
in an HTTP request body. Must be False when configuring access to individual
sub-resources. By default, the value is True.
See http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide\
/es-createupdatedomains.html#es-createdomain-configure-advanced-options
for more information.
:param dict log_publishing_options: Dict with options for various type of logs.
The keys denote the type of log file and can be one of the following:
- INDEX_SLOW_LOGS
- SEARCH_SLOW_LOGS
- ES_APPLICATION_LOGS
The value assigned to each key is a dict with the following case sensitive keys:
- CloudWatchLogsLogGroupArn (str): The ARN of the Cloudwatch log
group to which the log needs to be published.
- Enabled (bool): Specifies whether given log publishing option is enabled or not.
:param bool blocking: Whether or not the state should wait for all operations
(create/update/upgrade) to be completed. Default: ``True``
:param dict tags: Dict of tags to ensure are present on the Elasticsearch domain.
.. versionadded:: 3001
Example:
This will create an elasticsearch domain consisting of a single t2.small instance
in the eu-west-1 region (Ireland) and will wait until the instance is available
before returning from the state.
.. code-block:: yaml
Create new domain:
boto3_elasticsearch.present:
- name: my_domain
- elasticsearch_version: '5.1'
- elasticsearch_cluster_config:
InstanceType: t2.small.elasticsearch
InstanceCount: 1
DedicatedMasterEnabled: False
ZoneAwarenessEnabled: False
- ebs_options:
EBSEnabled: True
VolumeType: gp2
VolumeSize: 10
- snapshot_options:
AutomatedSnapshotStartHour: 3
- vpc_options:
SubnetIds:
- subnet-12345678
SecurityGroupIds:
- sg-12345678
- node_to_node_encryption_options:
Enabled: False
- region: eu-west-1
- tags:
foo: bar
baz: qux
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
action = None
current_domain = None
target_conf = salt.utils.data.filter_falsey(
{
"DomainName": name,
"ElasticsearchClusterConfig": elasticsearch_cluster_config,
"EBSOptions": ebs_options,
"AccessPolicies": (
salt.utils.json.dumps(access_policies)
if isinstance(access_policies, dict)
else access_policies
),
"SnapshotOptions": snapshot_options,
"VPCOptions": vpc_options,
"CognitoOptions": cognito_options,
"AdvancedOptions": advanced_options,
"LogPublishingOptions": log_publishing_options,
},
recurse_depth=3,
)
res = __salt__["boto3_elasticsearch.describe_elasticsearch_domain"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if not res["result"]:
ret["result"] = False
if "ResourceNotFoundException" in res["error"]:
action = "create"
config_diff = {"old": None, "new": target_conf}
else:
ret["comment"].append(res["error"])
else:
current_domain = salt.utils.data.filter_falsey(res["response"], recurse_depth=3)
current_domain_version = current_domain["ElasticsearchVersion"]
# Remove some values from current_domain that cannot be updated
for item in [
"DomainId",
"UpgradeProcessing",
"Created",
"Deleted",
"Processing",
"Endpoints",
"ARN",
"EncryptionAtRestOptions",
"NodeToNodeEncryptionOptions",
"ElasticsearchVersion",
"ServiceSoftwareOptions",
]:
if item in current_domain:
del current_domain[item]
# Further remove values from VPCOptions (if present) that are read-only
for item in ["VPCId", "AvailabilityZones"]:
if item in current_domain.get("VPCOptions", {}):
del current_domain["VPCOptions"][item]
# Some special cases
if "CognitoOptions" in current_domain:
if (
"CognitoOptions" not in target_conf
and not current_domain["CognitoOptions"]["Enabled"]
):
del current_domain["CognitoOptions"]
if (
"AdvancedOptions" not in target_conf
and "rest.action.multi.allow_explicit_index"
in current_domain["AdvancedOptions"]
):
del current_domain["AdvancedOptions"][
"rest.action.multi.allow_explicit_index"
]
if not current_domain["AdvancedOptions"]:
del current_domain["AdvancedOptions"]
# Compare current configuration with provided configuration
config_diff = salt.utils.data.recursive_diff(current_domain, target_conf)
if config_diff:
action = "update"
# Compare ElasticsearchVersion separately, as the update procedure differs.
if elasticsearch_version and current_domain_version != elasticsearch_version:
action = "upgrade"
if action in ["create", "update"]:
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'The Elasticsearch Domain "{}" would have been {}d.'.format(
name, action
)
)
ret["changes"] = config_diff
else:
boto_kwargs = salt.utils.data.filter_falsey(
{
"elasticsearch_version": elasticsearch_version,
"elasticsearch_cluster_config": elasticsearch_cluster_config,
"ebs_options": ebs_options,
"vpc_options": vpc_options,
"access_policies": access_policies,
"snapshot_options": snapshot_options,
"cognito_options": cognito_options,
"encryption_at_rest_options": encryption_at_rest_options,
"node_to_node_encryption_options": node_to_node_encryption_options,
"advanced_options": advanced_options,
"log_publishing_options": log_publishing_options,
"blocking": blocking,
"region": region,
"keyid": keyid,
"key": key,
"profile": profile,
}
)
if action == "update":
# Drop certain kwargs that do not apply to updates.
for item in [
"elasticsearch_version",
"encryption_at_rest_options",
"node_to_node_encryption_options",
]:
if item in boto_kwargs:
del boto_kwargs[item]
res = __salt__[
"boto3_elasticsearch.{}_elasticsearch_domain{}".format(
action, "_config" if action == "update" else ""
)
](name, **boto_kwargs)
if "error" in res:
ret["result"] = False
ret["comment"].append(res["error"])
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch Domain "{}" has been {}d.'.format(name, action)
)
ret["changes"] = config_diff
elif action == "upgrade":
res = upgraded(
name,
elasticsearch_version,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
ret["result"] = res["result"]
ret["comment"].extend(res["comment"])
if res["changes"]:
salt.utils.dictupdate.set_dict_key_value(
ret, "changes:old:version", res["changes"]["old"]
)
salt.utils.dictupdate.set_dict_key_value(
ret, "changes:new:version", res["changes"]["new"]
)
if tags is not None:
res = tagged(
name,
tags=tags,
replace=True,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
ret["result"] = res["result"]
ret["comment"].extend(res["comment"])
if "old" in res["changes"]:
salt.utils.dictupdate.update_dict_key_value(
ret, "changes:old:tags", res["changes"]["old"]
)
if "new" in res["changes"]:
salt.utils.dictupdate.update_dict_key_value(
ret, "changes:new:tags", res["changes"]["new"]
)
ret = _check_return_value(ret)
return ret
def absent(name, blocking=True, region=None, keyid=None, key=None, profile=None):
"""
Ensure the Elasticsearch Domain specified does not exist.
:param str name: The name of the Elasticsearch domain to be made absent.
:param bool blocking: Whether or not the state should wait for the deletion
to be completed. Default: ``True``
.. versionadded:: 3001
Example:
.. code-block:: yaml
Remove Elasticsearch Domain:
boto3_elasticsearch.absent:
- name: my_domain
- region: eu-west-1
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
res = __salt__["boto3_elasticsearch.exists"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(res["error"])
elif res["result"]:
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'Elasticsearch domain "{}" would have been removed.'.format(name)
)
ret["changes"] = {"old": name, "new": None}
else:
res = __salt__["boto3_elasticsearch.delete_elasticsearch_domain"](
domain_name=name,
blocking=blocking,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error deleting Elasticsearch domain "{}": {}'.format(
name, res["error"]
)
)
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" has been deleted.'.format(name)
)
ret["changes"] = {"old": name, "new": None}
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" is already absent.'.format(name)
)
ret = _check_return_value(ret)
return ret
def upgraded(
name,
elasticsearch_version,
blocking=True,
region=None,
keyid=None,
key=None,
profile=None,
):
"""
Ensures the Elasticsearch domain specified runs on the specified version of
elasticsearch. Only upgrades are possible as downgrades require a manual snapshot
and an S3 bucket to store them in.
Note that this operation is blocking until the upgrade is complete.
:param str name: The name of the Elasticsearch domain to upgrade.
:param str elasticsearch_version: String of format X.Y to specify version for
the Elasticsearch domain eg. "1.5" or "2.3".
.. versionadded:: 3001
Example:
.. code-block:: yaml
Upgrade Elasticsearch Domain:
boto3_elasticsearch.upgraded:
- name: my_domain
- elasticsearch_version: '7.2'
- region: eu-west-1
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
current_domain = None
res = __salt__["boto3_elasticsearch.describe_elasticsearch_domain"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if not res["result"]:
ret["result"] = False
if "ResourceNotFoundException" in res["error"]:
ret["comment"].append(
'The Elasticsearch domain "{}" does not exist.'.format(name)
)
else:
ret["comment"].append(res["error"])
else:
current_domain = res["response"]
current_version = current_domain["ElasticsearchVersion"]
if elasticsearch_version and current_version == elasticsearch_version:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" is already '
"at the desired version {}"
"".format(name, elasticsearch_version)
)
elif LooseVersion(elasticsearch_version) < LooseVersion(current_version):
ret["result"] = False
ret["comment"].append(
'Elasticsearch domain "{}" cannot be downgraded '
'to version "{}".'
"".format(name, elasticsearch_version)
)
if isinstance(ret["result"], bool):
return ret
log.debug("%s :upgraded: Check upgrade in progress", __name__)
# Check if an upgrade is already in progress
res = __salt__["boto3_elasticsearch.get_upgrade_status"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error determining current upgrade status of domain "{}": {}'.format(
name, res["error"]
)
)
return ret
if res["response"].get("StepStatus") == "IN_PROGRESS":
if blocking:
# An upgrade is already in progress, wait for it to complete
res2 = __salt__["boto3_elasticsearch.wait_for_upgrade"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res2:
ret["result"] = False
ret["comment"].append(
'Error waiting for upgrade of domain "{}" to complete: {}'.format(
name, res2["error"]
)
)
elif (
res2["response"].get("UpgradeName", "").endswith(elasticsearch_version)
):
ret["result"] = True
ret["comment"].append(
'Elasticsearch Domain "{}" is already at version "{}".'.format(
name, elasticsearch_version
)
)
else:
# We are not going to wait for it to complete, so bail.
ret["result"] = True
ret["comment"].append(
'An upgrade of Elasticsearch domain "{}" '
"is already underway: {}"
"".format(name, res["response"].get("UpgradeName"))
)
if isinstance(ret["result"], bool):
return ret
log.debug("%s :upgraded: Check upgrade eligibility", __name__)
# Check if the domain is eligible for an upgrade
res = __salt__["boto3_elasticsearch.check_upgrade_eligibility"](
name,
elasticsearch_version,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error checking upgrade eligibility for domain "{}": {}'.format(
name, res["error"]
)
)
elif not res["response"]:
ret["result"] = False
ret["comment"].append(
'The Elasticsearch Domain "{}" is not eligible to '
"be upgraded to version {}."
"".format(name, elasticsearch_version)
)
else:
log.debug("%s :upgraded: Start the upgrade", __name__)
# Start the upgrade
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'The Elasticsearch version for domain "{}" would have been upgraded.'
)
ret["changes"] = {
"old": current_domain["ElasticsearchVersion"],
"new": elasticsearch_version,
}
else:
res = __salt__["boto3_elasticsearch.upgrade_elasticsearch_domain"](
name,
elasticsearch_version,
blocking=blocking,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error upgrading Elasticsearch domain "{}": {}'.format(
name, res["error"]
)
)
else:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" has been '
"upgraded to version {}."
"".format(name, elasticsearch_version)
)
ret["changes"] = {
"old": current_domain["ElasticsearchVersion"],
"new": elasticsearch_version,
}
ret = _check_return_value(ret)
return ret
def latest(name, minor_only=True, region=None, keyid=None, key=None, profile=None):
"""
Ensures the Elasticsearch domain specifies runs on the latest compatible
version of elasticsearch, upgrading it if it is not.
Note that this operation is blocking until the upgrade is complete.
:param str name: The name of the Elasticsearch domain to upgrade.
:param bool minor_only: Only upgrade to the latest minor version.
.. versionadded:: 3001
Example:
The following example will ensure the elasticsearch domain ``my_domain`` is
upgraded to the latest minor version. So if it is currently 5.1 it will be
upgraded to 5.6.
.. code-block:: yaml
Upgrade Elasticsearch Domain:
boto3_elasticsearch.latest:
- name: my_domain
- minor_only: True
- region: eu-west-1
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
# Get current version
res = __salt__["boto3_elasticsearch.describe_elasticsearch_domain"](
domain_name=name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error getting information of Elasticsearch domain "{}": {}'.format(
name, res["error"]
)
)
else:
current_version = res["response"]["ElasticsearchVersion"]
# Get latest compatible version
latest_version = None
res = __salt__["boto3_elasticsearch.get_compatible_elasticsearch_versions"](
domain_name=name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
"Error getting compatible Elasticsearch versions "
'for Elasticsearch domain "{}": {}'
"".format(name, res["error"])
)
if isinstance(ret["result"], bool):
return ret
try:
latest_version = res["response"][0]["TargetVersions"].pop(-1)
except IndexError:
pass
if not current_version:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" can not be upgraded.'.format(name)
)
elif not latest_version:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" is already at '
'the lastest version "{}".'
"".format(name, current_version)
)
else:
a_current_version = current_version.split(".")
a_latest_version = latest_version.split(".")
if not (minor_only and a_current_version[0] != a_latest_version[0]):
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'Elasticsearch domain "{}" would have been updated '
'to version "{}".'.format(name, latest_version)
)
ret["changes"] = {"old": current_version, "new": latest_version}
else:
ret = upgraded(
name,
latest_version,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" is already at its '
"latest minor version {}."
"".format(name, current_version)
)
ret = _check_return_value(ret)
if ret["result"] and ret["changes"] and not minor_only:
# Try and see if we can upgrade again
res = latest(
name,
minor_only=minor_only,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if res["result"] and res["changes"]:
ret["changes"]["new"] = res["changes"]["new"]
ret["comment"].extend(res["comment"])
return ret
def tagged(
name, tags=None, replace=False, region=None, keyid=None, key=None, profile=None
):
"""
Ensures the Elasticsearch domain has the tags provided.
Adds tags to the domain unless ``replace`` is set to ``True``, in which
case all existing tags will be replaced with the tags provided in ``tags``.
(This will remove all tags if ``replace`` is ``True`` and ``tags`` is empty).
:param str name: The Elasticsearch domain to work with.
:param dict tags: The tags to add to/replace on the Elasticsearch domain.
:param bool replace: Whether or not to replace (``True``) all existing tags
on the Elasticsearch domain, or add (``False``) tags to the ES domain.
.. versionadded:: 3001
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
current_tags = {}
# Check if the domain exists
res = __salt__["boto3_elasticsearch.exists"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if res["result"]:
res = __salt__["boto3_elasticsearch.list_tags"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error fetching tags of Elasticsearch domain "{}": {}'.format(
name, res["error"]
)
)
else:
current_tags = res["response"] or {}
else:
ret["result"] = False
ret["comment"].append('Elasticsearch domain "{}" does not exist.'.format(name))
if isinstance(ret["result"], bool):
return ret
diff_tags = salt.utils.dictdiffer.deep_diff(current_tags, tags)
if not diff_tags:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" already has the specified tags.'.format(name)
)
else:
if replace:
ret["changes"] = diff_tags
else:
ret["changes"] = {"old": current_tags, "new": current_tags.update(tags)}
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'Tags on Elasticsearch domain "{}" would have been {}ed.'.format(
name, "replac" if replace else "add"
)
)
else:
if replace:
res = __salt__["boto3_elasticsearch.remove_tags"](
tag_keys=current_tags.keys(),
domain_name=name,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
"Error removing current tags from Elasticsearch "
'domain "{}": {}'.format(name, res["error"])
)
ret["changes"] = {}
if isinstance(ret["result"], bool):
return ret
res = __salt__["boto3_elasticsearch.add_tags"](
domain_name=name,
tags=tags,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error tagging Elasticsearch domain "{}": {}'.format(
name, res["error"]
)
)
ret["changes"] = {}
else:
ret["result"] = True
ret["comment"].append(
'Tags on Elasticsearch domain "{}" have been {}ed.'.format(
name, "replac" if replace else "add"
)
)
ret = _check_return_value(ret)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto3_elasticsearch.py | 0.721645 | 0.283031 | boto3_elasticsearch.py | pypi |
def __virtual__():
"""
Only load if the pushover module is available in __salt__
"""
if "pushover.post_message" in __salt__:
return "pushover"
return (False, "pushover module could not be loaded")
def post_message(
name,
user=None,
device=None,
message=None,
title=None,
priority=None,
expire=None,
retry=None,
sound=None,
api_version=1,
token=None,
):
"""
Send a message to a PushOver channel.
.. code-block:: yaml
pushover-message:
pushover.post_message:
- user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- title: Salt Returner
- device: phone
- priority: -1
- expire: 3600
- retry: 5
The following parameters are required:
name
The unique name for this event.
user
The user or group of users to send the message to. Must be ID of user, not name
or email address.
message
The message that is to be sent to the PushOver channel.
The following parameters are optional:
title
The title to use for the message.
device
The device for the user to send the message to.
priority
The priority for the message.
expire
The message should expire after specified amount of seconds.
retry
The message should be resent this many times.
token
The token for PushOver to use for authentication,
if not specified in the configuration options of master or minion.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["comment"] = "The following message is to be sent to PushOver: {}".format(
message
)
ret["result"] = None
return ret
if not user:
ret["comment"] = "PushOver user is missing: {}".format(user)
return ret
if not message:
ret["comment"] = "PushOver message is missing: {}".format(message)
return ret
result = __salt__["pushover.post_message"](
user=user,
message=message,
title=title,
device=device,
priority=priority,
expire=expire,
retry=retry,
token=token,
)
if result:
ret["result"] = True
ret["comment"] = "Sent message: {}".format(name)
else:
ret["comment"] = "Failed to send message: {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/pushover.py | 0.658747 | 0.162314 | pushover.py | pypi |
import json
import logging
from salt.exceptions import SaltException
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make these states available if Zabbix module and run_query function is available
and all 3rd party modules imported.
"""
if "zabbix.run_query" in __salt__:
return True
return False, "Import zabbix or other needed modules failed."
def present(name, params, **kwargs):
"""
Creates Zabbix Value map object or if differs update it according defined parameters
:param name: Zabbix Value map name
:param params: Definition of the Zabbix Value map
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-valuemap-present:
zabbix_valuemap.present:
- name: Number mapping
- params:
mappings:
- value: 1
newvalue: one
- value: 2
newvalue: two
"""
zabbix_id_mapper = __salt__["zabbix.get_zabbix_id_mapper"]()
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
# Create input params substituting functions with their results
params["name"] = name
input_params = __salt__["zabbix.substitute_params"](params, **kwargs)
log.info(
"Zabbix Value map: input params: %s",
str(json.dumps(input_params, indent=4)),
)
search = {"output": "extend", "selectMappings": "extend", "filter": {"name": name}}
# GET Value map object if exists
valuemap_get = __salt__["zabbix.run_query"]("valuemap.get", search, **kwargs)
log.info(
"Zabbix Value map: valuemap.get result: %s",
str(json.dumps(valuemap_get, indent=4)),
)
existing_obj = (
__salt__["zabbix.substitute_params"](valuemap_get[0], **kwargs)
if valuemap_get and len(valuemap_get) == 1
else False
)
if existing_obj:
diff_params = __salt__["zabbix.compare_params"](input_params, existing_obj)
log.info(
"Zabbix Value map: input params: {%s",
str(json.dumps(input_params, indent=4)),
)
log.info(
"Zabbix Value map: Object comparison result. Differences: %s",
str(diff_params),
)
if diff_params:
diff_params[zabbix_id_mapper["valuemap"]] = existing_obj[
zabbix_id_mapper["valuemap"]
]
log.info(
"Zabbix Value map: update params: %s",
str(json.dumps(diff_params, indent=4)),
)
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be fixed.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Value map "{}" differs '
"in following parameters: {}".format(name, diff_params)
),
"new": (
'Zabbix Value map "{}" would correspond to definition.'.format(
name
)
),
}
}
else:
valuemap_update = __salt__["zabbix.run_query"](
"valuemap.update", diff_params, **kwargs
)
log.info(
"Zabbix Value map: valuemap.update result: %s",
str(valuemap_update),
)
if valuemap_update:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" updated.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Value map "{}" differed '
"in following parameters: {}".format(name, diff_params)
),
"new": 'Zabbix Value map "{}" fixed.'.format(name),
}
}
else:
ret["result"] = True
ret[
"comment"
] = 'Zabbix Value map "{}" already exists and corresponds to a definition.'.format(
name
)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" does not exist.'.format(name),
"new": (
'Zabbix Value map "{}" would be created '
"according definition.".format(name)
),
}
}
else:
# ACTION.CREATE
valuemap_create = __salt__["zabbix.run_query"](
"valuemap.create", input_params, **kwargs
)
log.info(
"Zabbix Value map: valuemap.create result: %s",
str(valuemap_create),
)
if valuemap_create:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" did not exist.'.format(name),
"new": (
'Zabbix Value map "{}" created according definition.'.format(
name
)
),
}
}
return ret
def absent(name, **kwargs):
"""
Makes the Zabbix Value map to be absent (either does not exist or delete it).
:param name: Zabbix Value map name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-valuemap-absent:
zabbix_valuemap.absent:
- name: Value map name
"""
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
try:
object_id = __salt__["zabbix.get_object_id_by_params"](
"valuemap", {"filter": {"name": name}}, **kwargs
)
except SaltException:
object_id = False
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" does not exist.'.format(name)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" exists.'.format(name),
"new": 'Zabbix Value map "{}" would be deleted.'.format(name),
}
}
else:
valuemap_delete = __salt__["zabbix.run_query"](
"valuemap.delete", [object_id], **kwargs
)
if valuemap_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" existed.'.format(name),
"new": 'Zabbix Value map "{}" deleted.'.format(name),
}
}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/zabbix_valuemap.py | 0.671471 | 0.256273 | zabbix_valuemap.py | pypi |
def __virtual__():
"""
Only load if the postgres module is present
"""
if "postgres.language_create" not in __salt__:
return (
False,
"Unable to load postgres module. Make sure `postgres.bins_dir` is set.",
)
return True
def present(
name,
maintenance_db,
user=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None,
):
"""
Ensure that a named language is present in the specified
database.
name
The name of the language to install
maintenance_db
The name of the database in which the language is to be installed
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Language {} is already installed".format(name),
}
dbargs = {
"runas": user,
"host": db_host,
"user": db_user,
"port": db_port,
"password": db_password,
}
languages = __salt__["postgres.language_list"](maintenance_db, **dbargs)
if name not in languages:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Language {} is set to be installed".format(name)
return ret
if __salt__["postgres.language_create"](name, maintenance_db, **dbargs):
ret["comment"] = "Language {} has been installed".format(name)
ret["changes"][name] = "Present"
else:
ret["comment"] = "Failed to install language {}".format(name)
ret["result"] = False
return ret
def absent(
name,
maintenance_db,
user=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None,
):
"""
Ensure that a named language is absent in the specified
database.
name
The name of the language to remove
maintenance_db
The name of the database in which the language is to be installed
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
dbargs = {
"runas": user,
"host": db_host,
"user": db_user,
"port": db_port,
"password": db_password,
}
if __salt__["postgres.language_exists"](name, maintenance_db, **dbargs):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Language {} is set to be removed".format(name)
return ret
if __salt__["postgres.language_remove"](name, **dbargs):
ret["comment"] = "Language {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "Failed to remove language {}".format(name)
ret["result"] = False
ret["comment"] = "Language {} is not present so it cannot be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/postgres_language.py | 0.662687 | 0.163345 | postgres_language.py | pypi |
def __virtual__():
"""
Only load if boto is available.
"""
if "boto3_elasticache.cache_cluster_exists" in __salt__:
return "boto3_elasticache"
return (False, "boto3_elasticcache module could not be loaded")
def _diff_cache_cluster(current, desired):
"""
If you need to enhance what modify_cache_cluster() considers when deciding what is to be
(or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used
in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that
needs to be done to make the mappings meaningful should be done in the munging section
below as well.
This function will ONLY touch settings that are explicitly called out in 'desired' - any
settings which might have previously been changed from their 'default' values will not be
changed back simply by leaving them out of 'desired'. This is both intentional, and
much, much easier to code :)
"""
### The data formats are annoyingly (and as far as I can can tell, unnecessarily)
### different - we have to munge to a common format to compare...
if current.get("SecurityGroups") is not None:
current["SecurityGroupIds"] = [
s["SecurityGroupId"] for s in current["SecurityGroups"]
]
if current.get("CacheSecurityGroups") is not None:
current["CacheSecurityGroupNames"] = [
c["CacheSecurityGroupName"] for c in current["CacheSecurityGroups"]
]
if current.get("NotificationConfiguration") is not None:
current["NotificationTopicArn"] = current["NotificationConfiguration"][
"TopicArn"
]
current["NotificationTopicStatus"] = current["NotificationConfiguration"][
"TopicStatus"
]
if current.get("CacheParameterGroup") is not None:
current["CacheParameterGroupName"] = current["CacheParameterGroup"][
"CacheParameterGroupName"
]
modifiable = {
"AutoMinorVersionUpgrade": "AutoMinorVersionUpgrade",
"AZMode": "AZMode",
"CacheNodeType": "CacheNodeType",
"CacheNodeIdsToRemove": None,
"CacheParameterGroupName": "CacheParameterGroupName",
"CacheSecurityGroupNames": "CacheSecurityGroupNames",
"EngineVersion": "EngineVersion",
"NewAvailabilityZones": None,
"NotificationTopicArn": "NotificationTopicArn",
"NotificationTopicStatus": "NotificationTopicStatus",
"NumCacheNodes": "NumCacheNodes",
"PreferredMaintenanceWindow": "PreferredMaintenanceWindow",
"SecurityGroupIds": "SecurityGroupIds",
"SnapshotRetentionLimit": "SnapshotRetentionLimit",
"SnapshotWindow": "SnapshotWindow",
}
need_update = {}
for m, o in modifiable.items():
if m in desired:
if not o:
# Always pass these through - let AWS do the math...
need_update[m] = desired[m]
else:
if m in current:
# Equivalence testing works fine for current simple type comparisons
# This might need enhancement if more complex structures enter the picture
if current[m] != desired[m]:
need_update[m] = desired[m]
return need_update
def cache_cluster_present(
name,
wait=900,
security_groups=None,
region=None,
key=None,
keyid=None,
profile=None,
**args
):
"""
Ensure a given cache cluster exists.
name
Name of the cache cluster (cache cluster id).
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
security_groups
One or more VPC security groups (names and/or IDs) associated with the cache cluster.
.. note::
This is additive with any sec groups provided via the
SecurityGroupIds parameter below. Use this parameter ONLY when you
are creating a cluster in a VPC.
CacheClusterId
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
- A name must contain from 1 to 20 alphanumeric characters or hyphens.
- The first character must be a letter.
- A name cannot end with a hyphen or contain two consecutive hyphens.
.. note::
In general this parameter is not needed, as 'name' is used if it's
not provided.
ReplicationGroupId
The ID of the replication group to which this cache cluster should belong. If this
parameter is specified, the cache cluster is added to the specified replication
group as a read replica; otherwise, the cache cluster is a standalone primary that
is not part of any replication group. If the specified replication group is
Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is
created in Availability Zones that provide the best spread of read replicas across
Availability Zones.
.. note:
This parameter is ONLY valid if the Engine parameter is redis. Due
to current limitations on Redis (cluster mode disabled), this
parameter is not supported on Redis (cluster mode enabled)
replication groups.
AZMode
Specifies whether the nodes in this Memcached cluster are created in a single
Availability Zone or created across multiple Availability Zones in the cluster's
region. If the AZMode and PreferredAvailabilityZones are not specified,
ElastiCache assumes single-az mode.
.. note::
This parameter is ONLY supported for Memcached cache clusters.
PreferredAvailabilityZone
The EC2 Availability Zone in which the cache cluster is created. All nodes
belonging to this Memcached cache cluster are placed in the preferred Availability
Zone. If you want to create your nodes across multiple Availability Zones, use
PreferredAvailabilityZones.
Default: System chosen Availability Zone.
PreferredAvailabilityZones
A list of the Availability Zones in which cache nodes are created. The order of
the zones in the list is not important. The number of Availability Zones listed
must equal the value of NumCacheNodes. If you want all the nodes in the same
Availability Zone, use PreferredAvailabilityZone instead, or repeat the
Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
.. note::
This option is ONLY supported on Memcached.
If you are creating your cache cluster in an Amazon VPC
(recommended) you can only locate nodes in Availability Zones that
are associated with the subnets in the selected subnet group.
NumCacheNodes
The initial (integer) number of cache nodes that the cache cluster has.
.. note::
For clusters running Redis, this value must be 1.
For clusters running Memcached, this value must be between 1 and 20.
CacheNodeType
The compute and memory capacity of the nodes in the node group (shard).
Valid node types (and pricing for them) are exhaustively described at
https://aws.amazon.com/elasticache/pricing/
.. note::
All T2 instances must be created in a VPC
Redis backup/restore is not supported for Redis (cluster mode
disabled) T1 and T2 instances. Backup/restore is supported on Redis
(cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1
or T2 instances.
Engine
The name of the cache engine to be used for this cache cluster. Valid values for
this parameter are: memcached | redis
EngineVersion
The version number of the cache engine to be used for this cache cluster. To view
the supported cache engine versions, use the DescribeCacheEngineVersions operation.
.. note::
You can upgrade to a newer engine version but you cannot downgrade
to an earlier engine version. If you want to use an earlier engine
version, you must delete the existing cache cluster or replication
group and create it anew with the earlier engine version.
CacheParameterGroupName
The name of the parameter group to associate with this cache cluster. If this
argument is omitted, the default parameter group for the specified engine is used.
You cannot use any parameter group which has cluster-enabled='yes' when creating
a cluster.
CacheSubnetGroupName
The name of the Cache Subnet Group to be used for the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
.. note::
If you're going to launch your cluster in an Amazon VPC, you need
to create a subnet group before you start creating a cluster.
CacheSecurityGroupNames
A list of Cache Security Group names to associate with this cache cluster. Use
this parameter ONLY when you are creating a cache cluster outside of a VPC.
SecurityGroupIds
One or more VPC security groups associated with the cache cluster. Use this
parameter ONLY when you are creating a cache cluster within a VPC.
Tags
A list of tags to be added to this resource. Note that due to shortcomings in the
AWS API for Elasticache, these can only be set during resource creation - later
modification is not (currently) supported.
SnapshotArns
A single-element string list containing an Amazon Resource Name (ARN) that
uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
file is used to populate the node group (shard). The Amazon S3 object name in
the ARN cannot contain any commas.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotName
The name of a Redis snapshot from which to restore data into the new node group
(shard). The snapshot status changes to restoring while the new node group (shard)
is being created.
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
PreferredMaintenanceWindow
Specifies the weekly time range during which maintenance on the cache cluster is
permitted. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
(24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat
Example: sun:23:00-mon:01:30
Port
The port number on which each of the cache nodes accepts connections.
Default: 6379
NotificationTopicArn
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS)
topic to which notifications are sent.
.. note::
The Amazon SNS topic owner must be the same as the cache cluster
owner.
AutoMinorVersionUpgrade
This (boolean) parameter is currently disabled.
SnapshotRetentionLimit
The number of days for which ElastiCache retains automatic snapshots before
deleting them.
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
SnapshotWindow
The daily time range (in UTC) during which ElastiCache begins taking a daily
snapshot of your node group (shard). If you do not specify this parameter,
ElastiCache automatically chooses an appropriate time range.
Example: 05:00-09:00
.. note::
This parameter is ONLY valid if the Engine parameter is redis.
AuthToken
The password used to access a password protected server.
Password constraints:
- Must be only printable ASCII characters.
- Must be at least 16 characters and no more than 128 characters in length.
- Cannot contain any of the following characters: '/', '"', or "@".
CacheNodeIdsToRemove
A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002,
etc.). This parameter is only valid when NumCacheNodes is less than the existing number of
cache nodes. The number of cache node IDs supplied in this parameter must match the
difference between the existing number of cache nodes in the cluster or pending cache nodes,
whichever is greater, and the value of NumCacheNodes in the request.
NewAvailabilityZones
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes in the request is greater than the sum of
the number of active cache nodes and the number of cache nodes pending creation (which may
be zero). The number of Availability Zones supplied in this list must match the cache nodes
being added in this request.
Note: This option is only supported on Memcached clusters.
NotificationTopicStatus
The status of the SNS notification topic. Notifications are sent only if the status is active.
Valid values: active | inactive
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
args = {k: v for k, v in args.items() if not k.startswith("_")}
current = __salt__["boto3_elasticache.describe_cache_clusters"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if current:
check_update = True
else:
check_update = False
only_on_modify = [
"CacheNodeIdsToRemove",
"NewAvailabilityZones",
"NotificationTopicStatus",
]
create_args = {}
for k, v in args.items():
if k in only_on_modify:
check_update = True
else:
create_args[k] = v
if __opts__["test"]:
ret["comment"] = "Cache cluster {} would be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto3_elasticache.create_cache_cluster"](
name,
wait=wait,
security_groups=security_groups,
region=region,
key=key,
keyid=keyid,
profile=profile,
**create_args
)
if created:
new = __salt__["boto3_elasticache.describe_cache_clusters"](
name, region=region, key=key, keyid=keyid, profile=profile
)
ret["comment"] = "Cache cluster {} was created.".format(name)
ret["changes"]["old"] = None
ret["changes"]["new"] = new[0]
else:
ret["result"] = False
ret["comment"] = "Failed to create {} cache cluster.".format(name)
if check_update:
# Refresh this in case we're updating from 'only_on_modify' above...
updated = __salt__["boto3_elasticache.describe_cache_clusters"](
name, region=region, key=key, keyid=keyid, profile=profile
)
need_update = _diff_cache_cluster(updated["CacheClusters"][0], args)
if need_update:
if __opts__["test"]:
ret["comment"] = "Cache cluster {} would be modified.".format(name)
ret["result"] = None
return ret
modified = __salt__["boto3_elasticache.modify_cache_cluster"](
name,
wait=wait,
security_groups=security_groups,
region=region,
key=key,
keyid=keyid,
profile=profile,
**need_update
)
if modified:
new = __salt__["boto3_elasticache.describe_cache_clusters"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if ret["comment"]: # 'create' just ran...
ret["comment"] += " ... and then immediately modified."
else:
ret["comment"] = "Cache cluster {} was modified.".format(name)
ret["changes"]["old"] = current
ret["changes"]["new"] = new[0]
else:
ret["result"] = False
ret["comment"] = "Failed to modify cache cluster {}.".format(name)
else:
ret["comment"] = "Cache cluster {} is in the desired state.".format(name)
return ret
def cache_cluster_absent(
name, wait=600, region=None, key=None, keyid=None, profile=None, **args
):
"""
Ensure a given cache cluster is deleted.
name
Name of the cache cluster.
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
CacheClusterId
The node group (shard) identifier.
Note: In general this parameter is not needed, as 'name' is used if it's not provided.
FinalSnapshotIdentifier
The user-supplied name of a final cache cluster snapshot. This is the unique name
that identifies the snapshot. ElastiCache creates the snapshot, and then deletes the
cache cluster immediately afterward.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
args = {k: v for k, v in args.items() if not k.startswith("_")}
exists = __salt__["boto3_elasticache.cache_cluster_exists"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if exists:
if __opts__["test"]:
ret["comment"] = "Cache cluster {} would be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto3_elasticache.delete_cache_cluster"](
name,
wait=wait,
region=region,
key=key,
keyid=keyid,
profile=profile,
**args
)
if deleted:
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} cache cluster.".format(name)
else:
ret["comment"] = "Cache cluster {} already absent.".format(name)
return ret
def _diff_replication_group(current, desired):
"""
If you need to enhance what modify_replication_group() considers when deciding what is to be
(or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used
in modify_replication_group() to that in describe_replication_groups(). Any data fiddlery
that needs to be done to make the mappings meaningful should be done in the munging section
below as well.
This function will ONLY touch settings that are explicitly called out in 'desired' - any
settings which might have previously been changed from their 'default' values will not be
changed back simply by leaving them out of 'desired'. This is both intentional, and
much, much easier to code :)
"""
if current.get("AutomaticFailover") is not None:
current["AutomaticFailoverEnabled"] = (
True if current["AutomaticFailover"] in ("enabled", "enabling") else False
)
modifiable = {
# Amazingly, the AWS API provides NO WAY to query the current state of most repl group
# settings! All we can do is send a modify op with the desired value, just in case it's
# different. And THEN, we can't determine if it's been changed! Stupid? YOU BET!
"AutomaticFailoverEnabled": "AutomaticFailoverEnabled",
"AutoMinorVersionUpgrade": None,
"CacheNodeType": None,
"CacheParameterGroupName": None,
"CacheSecurityGroupNames": None,
"EngineVersion": None,
"NotificationTopicArn": None,
"NotificationTopicStatus": None,
"PreferredMaintenanceWindow": None,
"PrimaryClusterId": None,
"ReplicationGroupDescription": "Description",
"SecurityGroupIds": None,
"SnapshotRetentionLimit": "SnapshotRetentionLimit",
"SnapshottingClusterId": "SnapshottingClusterId",
"SnapshotWindow": "SnapshotWindow",
}
need_update = {}
for m, o in modifiable.items():
if m in desired:
if not o:
# Always pass these through - let AWS do the math...
need_update[m] = desired[m]
else:
if m in current:
# Equivalence testing works fine for current simple type comparisons
# This might need enhancement if more complex structures enter the picture
if current[m] != desired[m]:
need_update[m] = desired[m]
return need_update
def replication_group_present(
name,
wait=900,
security_groups=None,
region=None,
key=None,
keyid=None,
profile=None,
**args
):
"""
Ensure a replication group exists and is in the given state.
name
Name of replication group
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
security_groups
One or more VPC security groups (names and/or IDs) associated with the cache cluster.
.. note::
This is additive with any sec groups provided via the
SecurityGroupIds parameter below. Use this parameter ONLY when you
are creating a cluster in a VPC.
ReplicationGroupId
The replication group identifier. This parameter is stored as a lowercase string.
Constraints:
- A name must contain from 1 to 20 alphanumeric characters or hyphens.
- The first character must be a letter.
- A name cannot end with a hyphen or contain two consecutive hyphens.
.. note::
In general this parameter is not needed, as 'name' is used if it's
not provided.
ReplicationGroupDescription
A user-created description for the replication group.
PrimaryClusterId
The identifier of the cache cluster that serves as the primary for this replication group.
This cache cluster must already exist and have a status of available. This parameter is
not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified.
AutomaticFailoverEnabled
Specifies whether a read-only replica is automatically promoted to read/write primary if
the existing primary fails. If true, Multi-AZ is enabled for this replication group. If
false, Multi-AZ is disabled for this replication group.
Default: False
.. note::
AutomaticFailoverEnabled must be enabled for Redis (cluster mode
enabled) replication groups.
ElastiCache Multi-AZ replication groups is not supported on:
- Redis versions earlier than 2.8.6.
- Redis (cluster mode disabled): T1 and T2 node types.
- Redis (cluster mode enabled): T2 node types.
NumCacheClusters
The number of clusters this replication group initially has. This parameter is not used
if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.
If Multi-AZ is enabled , the value of this parameter must be at least 2. The maximum
permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
PreferredCacheClusterAZs
A list of EC2 Availability Zones in which the replication group's cache clusters are
created. The order of the Availability Zones in the list is the order in which clusters
are allocated. The primary cluster is created in the first AZ in the list. This parameter
is not used if there is more than one node group (shard). You should use
NodeGroupConfiguration instead. The number of Availability Zones listed must equal the
value of NumCacheClusters.
Default: System chosen Availability Zones.
.. note::
If you are creating your replication group in an Amazon VPC
(recommended), you can only locate cache clusters in Availability
Zones associated with the subnets in the selected subnet group.
NumNodeGroups
An optional parameter that specifies the number of node groups (shards)
for this Redis (cluster mode enabled) replication group. For Redis
(cluster mode disabled) either omit this parameter or set it to 1.
Default: 1
ReplicasPerNodeGroup
An optional parameter that specifies the number of replica nodes in
each node group (shard). Valid values are: 0 to 5
NodeGroupConfiguration
A list of node group (shard) configuration options. Each node group (shard) configuration
has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.
If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled)
replication group, you can use this parameter to configure one node group (shard) or you
can omit this parameter. For fiddly details of the expected data layout of this param, see
http://boto3.readthedocs.io/en/latest/reference/services/elasticache.html?#ElastiCache.Client.create_replication_group
CacheNodeType
The compute and memory capacity of the nodes in the node group (shard).
See https://aws.amazon.com/elasticache/pricing/ for current sizing, prices, and constraints.
.. note:
All T2 instances are created in an Amazon Virtual Private Cloud
(Amazon VPC). Backup/restore is not supported for Redis (cluster
mode disabled) T1 and T2 instances. Backup/restore is supported on
Redis (cluster mode enabled) T2 instances. Redis Append-only files
(AOF) functionality is not supported for T1 or T2 instances.
Engine
The name of the cache engine to be used for the cache clusters in this replication group.
EngineVersion
The version number of the cache engine to be used for the cache clusters in this replication
group. To view the supported cache engine versions, use the DescribeCacheEngineVersions
operation.
.. note::
You can upgrade to a newer engine version but you cannot downgrade
to an earlier engine version. If you want to use an earlier engine
version, you must delete the existing cache cluster or replication
group and create it anew with the earlier engine version.
CacheParameterGroupName
The name of the parameter group to associate with this replication group. If this argument
is omitted, the default cache parameter group for the specified engine is used.
.. note::
If you are running Redis version 3.2.4 or later, only one node
group (shard), and want to use a default parameter group, we
recommend that you specify the parameter group by name.
To create a Redis (cluster mode disabled) replication group, use
CacheParameterGroupName=default.redis3.2
To create a Redis (cluster mode enabled) replication group, use
CacheParameterGroupName=default.redis3.2.cluster.on
CacheSubnetGroupName
The name of the cache subnet group to be used for the replication group.
.. note::
If you're going to launch your cluster in an Amazon VPC, you need
to create a s group before you start creating a cluster. For more
information, see Subnets and Subnet Groups.
CacheSecurityGroupNames
A list of cache security group names to associate with this replication group.
SecurityGroupIds
One or more Amazon VPC security groups associated with this replication group. Use this
parameter only when you are creating a replication group in an VPC.
Tags
A list of tags to be added to this resource. Note that due to shortcomings in the
AWS API for Elasticache, these can only be set during resource creation - later
modification is not (currently) supported.
SnapshotArns
A list of ARNs that uniquely identify the Redis RDB snapshot files stored in Amazon S3.
These snapshot files are used to populate the replication group. The Amazon S3 object name
in the ARN cannot contain any commas. The list must match the number of node groups (shards)
in the replication group, which means you cannot repartition.
.. note::
This parameter is only valid if the Engine parameter is redis.
SnapshotName
The name of a snapshot from which to restore data into the new replication group. The
snapshot status changes to restoring while the new replication group is being created.
Note: This parameter is only valid if the Engine parameter is redis.
PreferredMaintenanceWindow
Specifies the weekly time range during which maintenance on the cluster is performed. It is
specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum
maintenance window is a 60 minute period.
Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat
Example: sun:23:00-mon:01:30
Port
The port number on which each member of the replication group accepts connections.
NotificationTopicArn
The ARN of an SNS topic to which notifications are sent.
.. note::
The SNS topic owner must be the same as the cache cluster owner.
AutoMinorVersionUpgrade
This parameter is currently disabled.
SnapshotRetentionLimit
The number of days for which ElastiCache will retain automatic snapshots before deleting
them.
Default: 0 (that is, automatic backups are disabled for this cache cluster).
.. note::
This parameter is only valid if the Engine parameter is redis.
SnapshotWindow
The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of
your node group (shard). If you do not specify this parameter, ElastiCache automatically
chooses an appropriate time range.
Example: 05:00-09:00
.. note::
This parameter is only valid if the Engine parameter is redis.
AuthToken
The password used to access a password protected server.
Password constraints:
- Must be only printable ASCII characters.
- Must be at least 16 characters and no more than 128 characters in length.
- Cannot contain any of the following characters: '/', '"', or "@".
SnapshottingClusterId
The cache cluster ID that is used as the daily snapshot source for the replication group.
NotificationTopicStatus
The status of the SNS notification topic. Notifications are sent only if the status is active.
Valid values: active | inactive
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
args = {k: v for k, v in args.items() if not k.startswith("_")}
current = __salt__["boto3_elasticache.describe_replication_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if current:
check_update = True
else:
check_update = False
only_on_modify = ["SnapshottingClusterId", "NotificationTopicStatus"]
create_args = {}
for k, v in args.items():
if k in only_on_modify:
check_update = True
else:
create_args[k] = v
if __opts__["test"]:
ret["comment"] = "Replication group {} would be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto3_elasticache.create_replication_group"](
name,
wait=wait,
security_groups=security_groups,
region=region,
key=key,
keyid=keyid,
profile=profile,
**create_args
)
if created:
new = __salt__["boto3_elasticache.describe_replication_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)
ret["comment"] = "Replication group {} was created.".format(name)
ret["changes"]["old"] = None
ret["changes"]["new"] = new[0]
else:
ret["result"] = False
ret["comment"] = "Failed to create {} replication group.".format(name)
if check_update:
# Refresh this in case we're updating from 'only_on_modify' above...
updated = __salt__["boto3_elasticache.describe_replication_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)[0]
need_update = _diff_replication_group(updated, args)
if need_update:
if __opts__["test"]:
ret["comment"] = "Replication group {} would be modified.".format(name)
ret["result"] = None
return ret
modified = __salt__["boto3_elasticache.modify_replication_group"](
name,
wait=wait,
security_groups=security_groups,
region=region,
key=key,
keyid=keyid,
profile=profile,
**need_update
)
if modified:
new = __salt__["boto3_elasticache.describe_replication_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if ret["comment"]: # 'create' just ran...
ret["comment"] += " ... and then immediately modified."
else:
ret["comment"] = "Replication group {} was modified.".format(name)
ret["changes"]["old"] = current[0] if current else None
ret["changes"]["new"] = new[0]
else:
ret["result"] = False
ret["comment"] = "Failed to modify replication group {}.".format(name)
else:
ret["comment"] = "Replication group {} is in the desired state.".format(
name
)
return ret
def replication_group_absent(
name, wait=600, region=None, key=None, keyid=None, profile=None, **args
):
"""
Ensure a given replication group is deleted.
name
Name of the replication group.
wait
Integer describing how long, in seconds, to wait for confirmation from AWS that the
resource is in the desired state. Zero meaning to return success or failure immediately
of course. Note that waiting for the cluster to become available is generally the
better course, as failure to do so will often lead to subsequent failures when managing
dependent resources.
ReplicationGroupId
The replication group identifier.
Note: In general this parameter is not needed, as 'name' is used if it's not provided.
RetainPrimaryCluster
If set to true, all of the read replicas are deleted, but the primary node is retained.
FinalSnapshotIdentifier
The name of a final node group (shard) snapshot. ElastiCache creates the snapshot from
the primary node in the cluster, rather than one of the replicas; this is to ensure that
it captures the freshest data. After the final snapshot is taken, the replication group is
immediately deleted.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
args = {k: v for k, v in args.items() if not k.startswith("_")}
exists = __salt__["boto3_elasticache.replication_group_exists"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if exists:
if __opts__["test"]:
ret["comment"] = "Replication group {} would be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto3_elasticache.delete_replication_group"](
name,
wait=wait,
region=region,
key=key,
keyid=keyid,
profile=profile,
**args
)
if deleted:
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} replication group.".format(name)
else:
ret["comment"] = "Replication group {} already absent.".format(name)
return ret
def _diff_cache_subnet_group(current, desired):
"""
If you need to enhance what modify_cache_subnet_group() considers when deciding what is to be
(or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used
in modify_cache_subnet_group() to that in describe_cache_subnet_group(). Any data fiddlery that
needs to be done to make the mappings meaningful should be done in the munging section
below as well.
This function will ONLY touch settings that are explicitly called out in 'desired' - any
settings which might have previously been changed from their 'default' values will not be
changed back simply by leaving them out of 'desired'. This is both intentional, and
much, much easier to code :)
"""
modifiable = {
"CacheSubnetGroupDescription": "CacheSubnetGroupDescription",
"SubnetIds": "SubnetIds",
}
need_update = {}
for m, o in modifiable.items():
if m in desired:
if not o:
# Always pass these through - let AWS do the math...
need_update[m] = desired[m]
else:
if m in current:
# Equivalence testing works fine for current simple type comparisons
# This might need enhancement if more complex structures enter the picture
if current[m] != desired[m]:
need_update[m] = desired[m]
return need_update
def cache_subnet_group_present(
name, subnets=None, region=None, key=None, keyid=None, profile=None, **args
):
"""
Ensure cache subnet group exists.
name
A name for the cache subnet group. This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or hyphens.
subnets
A list of VPC subnets (IDs, Names, or a mix) for the cache subnet group.
CacheSubnetGroupName
A name for the cache subnet group. This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or hyphens.
Note: In general this parameter is not needed, as 'name' is used if it's not provided.
CacheSubnetGroupDescription
A description for the cache subnet group.
SubnetIds
A list of VPC subnet IDs for the cache subnet group. This is ADDITIVE with 'subnets' above.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
args = {k: v for k, v in args.items() if not k.startswith("_")}
current = __salt__["boto3_elasticache.describe_cache_subnet_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if current:
check_update = True
else:
check_update = False
if __opts__["test"]:
ret["comment"] = "Cache subnet group {} would be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto3_elasticache.create_cache_subnet_group"](
name,
subnets=subnets,
region=region,
key=key,
keyid=keyid,
profile=profile,
**args
)
if created:
new = __salt__["boto3_elasticache.describe_cache_subnet_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)
ret["comment"] = "Cache subnet group {} was created.".format(name)
ret["changes"]["old"] = None
ret["changes"]["new"] = new[0]
else:
ret["result"] = False
ret["comment"] = "Failed to create {} cache subnet group.".format(name)
if check_update:
need_update = _diff_cache_subnet_group(current, args)
if need_update:
if __opts__["test"]:
ret["comment"] = "Cache subnet group {} would be modified.".format(name)
ret["result"] = None
return ret
modified = __salt__["boto3_elasticache.modify_cache_subnet_group"](
name,
subnets=subnets,
region=region,
key=key,
keyid=keyid,
profile=profile,
**need_update
)
if modified:
new = __salt__["boto3_elasticache.describe_cache_subnet_groups"](
name, region=region, key=key, keyid=keyid, profile=profile
)
ret["comment"] = "Cache subnet group {} was modified.".format(name)
ret["changes"]["old"] = current["CacheSubetGroups"][0]
ret["changes"]["new"] = new[0]
else:
ret["result"] = False
ret["comment"] = "Failed to modify cache subnet group {}.".format(name)
else:
ret["comment"] = "Cache subnet group {} is in the desired state.".format(
name
)
return ret
def cache_subnet_group_absent(
name, region=None, key=None, keyid=None, profile=None, **args
):
"""
Ensure a given cache subnet group is deleted.
name
Name of the cache subnet group.
CacheSubnetGroupName
A name for the cache subnet group.
Note: In general this parameter is not needed, as 'name' is used if it's not provided.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
args = {k: v for k, v in args.items() if not k.startswith("_")}
exists = __salt__["boto3_elasticache.cache_subnet_group_exists"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if exists:
if __opts__["test"]:
ret["comment"] = "Cache subnet group {} would be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto3_elasticache.delete_cache_subnet_group"](
name, region=region, key=key, keyid=keyid, profile=profile, **args
)
if deleted:
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} cache_subnet group.".format(name)
else:
ret["comment"] = "Cache subnet group {} already absent.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto3_elasticache.py | 0.695441 | 0.303093 | boto3_elasticache.py | pypi |
def __virtual__():
"""
Only load if the Traffic Server module is available in __salt__
"""
if "trafficserver.set_config" in __salt__:
return "trafficserver"
return (False, "trafficserver module could not be loaded")
def bounce_cluster(name):
"""
Bounce all Traffic Server nodes in the cluster. Bouncing Traffic Server
shuts down and immediately restarts Traffic Server, node-by-node.
.. code-block:: yaml
bounce_ats_cluster:
trafficserver.bounce_cluster
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Bouncing cluster"
return ret
__salt__["trafficserver.bounce_cluster"]()
ret["result"] = True
ret["comment"] = "Bounced cluster"
return ret
def bounce_local(name, drain=False):
"""
Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down
and immediately restarts the Traffic Server node.
This option modifies the behavior of traffic_line -b and traffic_line -L
such that traffic_server is not shut down until the number of active client
connections drops to the number given by the
proxy.config.restart.active_client_threshold configuration variable.
.. code-block:: yaml
bounce_ats_local:
trafficserver.bounce_local
bounce_ats_local:
trafficserver.bounce_local
- drain: True
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Bouncing local node"
return ret
if drain:
__salt__["trafficserver.bounce_local"](drain=True)
ret["result"] = True
ret["comment"] = "Bounced local node with drain option"
return ret
else:
__salt__["trafficserver.bounce_local"]()
ret["result"] = True
ret["comment"] = "Bounced local node"
return ret
def clear_cluster(name):
"""
Clears accumulated statistics on all nodes in the cluster.
.. code-block:: yaml
clear_ats_cluster:
trafficserver.clear_cluster
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Clearing cluster statistics"
return ret
__salt__["trafficserver.clear_cluster"]()
ret["result"] = True
ret["comment"] = "Cleared cluster statistics"
return ret
def clear_node(name):
"""
Clears accumulated statistics on the local node.
.. code-block:: yaml
clear_ats_node:
trafficserver.clear_node
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Clearing local node statistics"
return ret
__salt__["trafficserver.clear_node"]()
ret["result"] = True
ret["comment"] = "Cleared local node statistics"
return ret
def restart_cluster(name):
"""
Restart the traffic_manager process and the traffic_server process on all
the nodes in a cluster.
.. code-block:: bash
restart_ats_cluster:
trafficserver.restart_cluster
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Restarting cluster"
return ret
__salt__["trafficserver.restart_cluster"]()
ret["result"] = True
ret["comment"] = "Restarted cluster"
return ret
def restart_local(name, drain=False):
"""
Restart the traffic_manager and traffic_server processes on the local node.
This option modifies the behavior of traffic_line -b and traffic_line -L
such that traffic_server is not shut down until the number of active client
connections drops to the number given by the
proxy.config.restart.active_client_threshold configuration variable.
.. code-block:: yaml
restart_ats_local:
trafficserver.restart_local
restart_ats_local_drain:
trafficserver.restart_local
- drain: True
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Restarting local node"
return ret
if drain:
__salt__["trafficserver.restart_local"](drain=True)
ret["result"] = True
ret["comment"] = "Restarted local node with drain option"
return ret
else:
__salt__["trafficserver.restart_local"]()
ret["result"] = True
ret["comment"] = "Restarted local node"
return ret
def config(name, value):
"""
Set Traffic Server configuration variable values.
.. code-block:: yaml
proxy.config.proxy_name:
trafficserver.config:
- value: cdn.site.domain.tld
OR
traffic_server_setting:
trafficserver.config:
- name: proxy.config.proxy_name
- value: cdn.site.domain.tld
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Configuring {} to {}".format(
name,
value,
)
return ret
__salt__["trafficserver.set_config"](name, value)
ret["result"] = True
ret["comment"] = "Configured {} to {}".format(name, value)
return ret
def shutdown(name):
"""
Shut down Traffic Server on the local node.
.. code-block:: yaml
shutdown_ats:
trafficserver.shutdown
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Shutting down local node"
return ret
__salt__["trafficserver.shutdown"]()
ret["result"] = True
ret["comment"] = "Shutdown local node"
return ret
def startup(name):
"""
Start Traffic Server on the local node.
.. code-block:: yaml
startup_ats:
trafficserver.startup
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Starting up local node"
return ret
__salt__["trafficserver.startup"]()
ret["result"] = True
ret["comment"] = "Starting up local node"
return ret
def refresh(name):
"""
Initiate a Traffic Server configuration file reread. Use this command to
update the running configuration after any configuration file modification.
The timestamp of the last reconfiguration event (in seconds since epoch) is
published in the proxy.node.config.reconfigure_time metric.
.. code-block:: yaml
refresh_ats:
trafficserver.refresh
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Refreshing local node configuration"
return ret
__salt__["trafficserver.refresh"]()
ret["result"] = True
ret["comment"] = "Refreshed local node configuration"
return ret
def zero_cluster(name):
"""
Reset performance statistics to zero across the cluster.
.. code-block:: yaml
zero_ats_cluster:
trafficserver.zero_cluster
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Zeroing cluster statistics"
return ret
__salt__["trafficserver.zero_cluster"]()
ret["result"] = True
ret["comment"] = "Zeroed cluster statistics"
return ret
def zero_node(name):
"""
Reset performance statistics to zero on the local node.
.. code-block:: yaml
zero_ats_node:
trafficserver.zero_node
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Zeroing local node statistics"
return ret
__salt__["trafficserver.zero_node"]()
ret["result"] = True
ret["comment"] = "Zeroed local node statistics"
return ret
def offline(name, path):
"""
Mark a cache storage device as offline. The storage is identified by a path
which must match exactly a path specified in storage.config. This removes
the storage from the cache and redirects requests that would have used this
storage to other storage. This has exactly the same effect as a disk
failure for that storage. This does not persist across restarts of the
traffic_server process.
.. code-block:: yaml
offline_ats_path:
trafficserver.offline:
- path: /path/to/cache
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Setting {} to offline".format(path)
return ret
__salt__["trafficserver.offline"](path)
ret["result"] = True
ret["comment"] = "Set {} as offline".format(path)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/trafficserver.py | 0.725843 | 0.327561 | trafficserver.py | pypi |
import logging
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_elasticache.exists" in __salt__:
return "boto_elasticache"
return (False, "boto_elasticache module could not be loaded")
def cache_cluster_present(*args, **kwargs):
return present(*args, **kwargs)
def present(
name,
engine=None,
cache_node_type=None,
num_cache_nodes=None,
preferred_availability_zone=None,
port=None,
cache_parameter_group_name=None,
cache_security_group_names=None,
replication_group_id=None,
auto_minor_version_upgrade=True,
security_group_ids=None,
cache_subnet_group_name=None,
engine_version=None,
notification_topic_arn=None,
preferred_maintenance_window=None,
wait=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure the cache cluster exists.
name
Name of the cache cluster (cache cluster id).
engine
The name of the cache engine to be used for this cache cluster. Valid
values are memcached or redis.
cache_node_type
The compute and memory capacity of the nodes in the cache cluster.
cache.t1.micro, cache.m1.small, etc. See: https://boto.readthedocs.io/en/latest/ref/elasticache.html#boto.elasticache.layer1.ElastiCacheConnection.create_cache_cluster
num_cache_nodes
The number of cache nodes that the cache cluster will have.
preferred_availability_zone
The EC2 Availability Zone in which the cache cluster will be created.
All cache nodes belonging to a cache cluster are placed in the
preferred availability zone.
port
The port number on which each of the cache nodes will accept
connections.
cache_parameter_group_name
The name of the cache parameter group to associate with this cache
cluster. If this argument is omitted, the default cache parameter group
for the specified engine will be used.
cache_security_group_names
A list of cache security group names to associate with this cache
cluster. Use this parameter only when you are creating a cluster
outside of a VPC.
replication_group_id
The replication group to which this cache cluster should belong. If
this parameter is specified, the cache cluster will be added to the
specified replication group as a read replica; otherwise, the cache
cluster will be a standalone primary that is not part of any
replication group.
auto_minor_version_upgrade
Determines whether minor engine upgrades will be applied automatically
to the cache cluster during the maintenance window. A value of True
allows these upgrades to occur; False disables automatic upgrades.
security_group_ids
One or more VPC security groups associated with the cache cluster. Use
this parameter only when you are creating a cluster in a VPC.
cache_subnet_group_name
The name of the cache subnet group to be used for the cache cluster.
Use this parameter only when you are creating a cluster in a VPC.
engine_version
The version number of the cache engine to be used for this cluster.
notification_topic_arn
The Amazon Resource Name (ARN) of the Amazon Simple Notification
Service (SNS) topic to which notifications will be sent. The Amazon SNS
topic owner must be the same as the cache cluster owner.
preferred_maintenance_window
The weekly time range (in UTC) during which system maintenance can
occur. Example: sun:05:00-sun:09:00
wait
Boolean. Wait for confirmation from boto that the cluster is in the
available state.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if cache_security_group_names and cache_subnet_group_name:
_subnet_group = __salt__["boto_elasticache.get_cache_subnet_group"](
cache_subnet_group_name, region, key, keyid, profile
)
vpc_id = _subnet_group["vpc_id"]
if not security_group_ids:
security_group_ids = []
_security_group_ids = __salt__["boto_secgroup.convert_to_group_ids"](
groups=cache_security_group_names,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
security_group_ids.extend(_security_group_ids)
cache_security_group_names = None
config = __salt__["boto_elasticache.get_config"](name, region, key, keyid, profile)
if config is None:
msg = "Failed to retrieve cache cluster info from AWS."
ret["comment"] = msg
ret["result"] = None
return ret
elif not config:
if __opts__["test"]:
msg = "Cache cluster {} is set to be created.".format(name)
ret["comment"] = msg
ret["result"] = None
return ret
created = __salt__["boto_elasticache.create"](
name=name,
num_cache_nodes=num_cache_nodes,
cache_node_type=cache_node_type,
engine=engine,
replication_group_id=replication_group_id,
engine_version=engine_version,
cache_parameter_group_name=cache_parameter_group_name,
cache_subnet_group_name=cache_subnet_group_name,
cache_security_group_names=cache_security_group_names,
security_group_ids=security_group_ids,
preferred_availability_zone=preferred_availability_zone,
preferred_maintenance_window=preferred_maintenance_window,
port=port,
notification_topic_arn=notification_topic_arn,
auto_minor_version_upgrade=auto_minor_version_upgrade,
wait=wait,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"]["old"] = None
config = __salt__["boto_elasticache.get_config"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = config
else:
ret["result"] = False
ret["comment"] = "Failed to create {} cache cluster.".format(name)
return ret
# TODO: support modification of existing elasticache clusters
else:
ret["comment"] = "Cache cluster {} is present.".format(name)
return ret
def subnet_group_present(
name,
subnet_ids=None,
subnet_names=None,
description=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure ElastiCache subnet group exists.
.. versionadded:: 2015.8.0
name
The name for the ElastiCache subnet group. This value is stored as a lowercase string.
subnet_ids
A list of VPC subnet IDs for the cache subnet group. Exclusive with subnet_names.
subnet_names
A list of VPC subnet names for the cache subnet group. Exclusive with subnet_ids.
description
Subnet group description.
tags
A list of tags.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elasticache.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Subnet group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_elasticache.create_subnet_group"](
name=name,
subnet_ids=subnet_ids,
subnet_names=subnet_names,
description=description,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not created:
ret["result"] = False
ret["comment"] = "Failed to create {} subnet group.".format(name)
return ret
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["comment"] = "Subnet group {} created.".format(name)
return ret
ret["comment"] = "Subnet group present."
return ret
def cache_cluster_absent(*args, **kwargs):
return absent(*args, **kwargs)
def absent(name, wait=True, region=None, key=None, keyid=None, profile=None):
"""
Ensure the named elasticache cluster is deleted.
name
Name of the cache cluster.
wait
Boolean. Wait for confirmation from boto that the cluster is in the
deleting state.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_present = __salt__["boto_elasticache.exists"](name, region, key, keyid, profile)
if is_present:
if __opts__["test"]:
ret["comment"] = "Cache cluster {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_elasticache.delete"](
name, wait, region, key, keyid, profile
)
if deleted:
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} cache cluster.".format(name)
else:
ret["comment"] = "{} does not exist in {}.".format(name, region)
return ret
def replication_group_present(*args, **kwargs):
return creategroup(*args, **kwargs)
def creategroup(
name,
primary_cluster_id,
replication_group_description,
wait=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure the a replication group is create.
name
Name of replication group
wait
Waits for the group to be available
primary_cluster_id
Name of the master cache node
replication_group_description
Description for the group
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
is_present = __salt__["boto_elasticache.group_exists"](
name, region, key, keyid, profile
)
if not is_present:
if __opts__["test"]:
ret["comment"] = "Replication {} is set to be created.".format(name)
ret["result"] = None
created = __salt__["boto_elasticache.create_replication_group"](
name,
primary_cluster_id,
replication_group_description,
wait,
region,
key,
keyid,
profile,
)
if created:
config = __salt__["boto_elasticache.describe_replication_group"](
name, region, key, keyid, profile
)
ret["changes"]["old"] = None
ret["changes"]["new"] = config
ret["result"] = True
else:
ret["result"] = False
ret["comment"] = "Failed to create {} replication group.".format(name)
else:
ret["comment"] = "{} replication group exists .".format(name)
ret["result"] = True
return ret
def subnet_group_absent(
name, tags=None, region=None, key=None, keyid=None, profile=None
):
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elasticache.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
ret["result"] = True
ret["comment"] = "{} ElastiCache subnet group does not exist.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "ElastiCache subnet group {} is set to be removed.".format(
name
)
ret["result"] = None
return ret
deleted = __salt__["boto_elasticache.delete_subnet_group"](
name, region, key, keyid, profile
)
if not deleted:
ret["result"] = False
ret["comment"] = "Failed to delete {} ElastiCache subnet group.".format(name)
return ret
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["comment"] = "ElastiCache subnet group {} deleted.".format(name)
return ret
def replication_group_absent(
name, tags=None, region=None, key=None, keyid=None, profile=None
):
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elasticache.group_exists"](
name=name, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
ret["result"] = True
ret["comment"] = "{} ElastiCache replication group does not exist.".format(name)
log.info(ret["comment"])
return ret
if __opts__["test"]:
ret[
"comment"
] = "ElastiCache replication group {} is set to be removed.".format(name)
ret["result"] = True
return ret
deleted = __salt__["boto_elasticache.delete_replication_group"](
name, region, key, keyid, profile
)
if not deleted:
ret["result"] = False
log.error(ret["comment"])
ret["comment"] = "Failed to delete {} ElastiCache replication group.".format(
name
)
return ret
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["comment"] = "ElastiCache replication group {} deleted.".format(name)
log.info(ret["comment"])
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_elasticache.py | 0.63023 | 0.269872 | boto_elasticache.py | pypi |
import logging
log = logging.getLogger(__name__)
def additions_installed(name, reboot=False, upgrade_os=False):
"""
Ensure that the VirtualBox Guest Additions are installed. Uses the CD,
connected by VirtualBox.
name
The name has no functional value and is only used as a tracking
reference.
reboot : False
Restart OS to complete installation.
upgrade_os : False
Upgrade OS (to ensure the latests version of kernel and developer tools
installed).
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
current_state = __salt__["vbox_guest.additions_version"]()
if current_state:
ret["result"] = True
ret["comment"] = "System already in the correct state"
return ret
if __opts__["test"]:
ret["comment"] = "The state of VirtualBox Guest Additions will be changed."
ret["changes"] = {
"old": current_state,
"new": True,
}
ret["result"] = None
return ret
new_state = __salt__["vbox_guest.additions_install"](
reboot=reboot, upgrade_os=upgrade_os
)
ret["comment"] = "The state of VirtualBox Guest Additions was changed!"
ret["changes"] = {
"old": current_state,
"new": new_state,
}
ret["result"] = bool(new_state)
return ret
def additions_removed(name, force=False):
"""
Ensure that the VirtualBox Guest Additions are removed. Uses the CD,
connected by VirtualBox.
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press 'Host+D' ('Host' is usually 'Right Ctrl').
name
The name has no functional value and is only used as a tracking
reference.
force
Force VirtualBox Guest Additions removing.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
current_state = __salt__["vbox_guest.additions_version"]()
if not current_state:
ret["result"] = True
ret["comment"] = "System already in the correct state"
return ret
if __opts__["test"]:
ret["comment"] = "The state of VirtualBox Guest Additions will be changed."
ret["changes"] = {
"old": current_state,
"new": True,
}
ret["result"] = None
return ret
new_state = __salt__["vbox_guest.additions_remove"](force=force)
ret["comment"] = "The state of VirtualBox Guest Additions was changed!"
ret["changes"] = {
"old": current_state,
"new": new_state,
}
ret["result"] = bool(new_state)
return ret
def grant_access_to_shared_folders_to(name, users=None):
"""
Grant access to auto-mounted shared folders to the users.
User is specified by its name. To grant access for several users use
argument `users`.
name
Name of the user to grant access to auto-mounted shared folders to.
users
List of names of users to grant access to auto-mounted shared folders to.
If specified, `name` will not be taken into account.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
current_state = __salt__["vbox_guest.list_shared_folders_users"]()
if users is None:
users = [name]
if current_state == users:
ret["result"] = True
ret["comment"] = "System already in the correct state"
return ret
if __opts__["test"]:
ret["comment"] = (
"List of users who have access to auto-mounted "
"shared folders will be changed"
)
ret["changes"] = {
"old": current_state,
"new": users,
}
ret["result"] = None
return ret
new_state = __salt__["vbox_guest.grant_access_to_shared_folders_to"](
name=name, users=users
)
ret[
"comment"
] = "List of users who have access to auto-mounted shared folders was changed"
ret["changes"] = {
"old": current_state,
"new": new_state,
}
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/vbox_guest.py | 0.639398 | 0.225172 | vbox_guest.py | pypi |
import difflib
import logging
import os.path
import re
import salt.utils.args
import salt.utils.files
import salt.utils.stringutils
from salt.modules.augeas_cfg import METHOD_MAP
log = logging.getLogger(__name__)
def __virtual__():
if "augeas.execute" in __salt__:
return "augeas"
return (False, "augeas module could not be loaded")
def _workout_filename(filename):
"""
Recursively workout the file name from an augeas change
"""
if os.path.isfile(filename) or filename == "/":
if filename == "/":
filename = None
return filename
else:
return _workout_filename(os.path.dirname(filename))
def _check_filepath(changes):
"""
Ensure all changes are fully qualified and affect only one file.
This ensures that the diff output works and a state change is not
incorrectly reported.
"""
filename = None
for change_ in changes:
try:
cmd, arg = change_.split(" ", 1)
if cmd not in METHOD_MAP:
error = "Command {} is not supported (yet)".format(cmd)
raise ValueError(error)
method = METHOD_MAP[cmd]
parts = salt.utils.args.shlex_split(arg)
if method in ["set", "setm", "move", "remove"]:
filename_ = parts[0]
else:
_, _, filename_ = parts
if not filename_.startswith("/files"):
error = (
"Changes should be prefixed with "
"/files if no context is provided,"
" change: {}".format(change_)
)
raise ValueError(error)
filename_ = re.sub("^/files|/$", "", filename_)
if filename is not None:
if filename != filename_:
error = (
"Changes should be made to one "
"file at a time, detected changes "
"to {} and {}".format(filename, filename_)
)
raise ValueError(error)
filename = filename_
except (ValueError, IndexError) as err:
log.error(err)
if "error" not in locals():
error = (
"Invalid formatted command, see debug log for details: {}".format(
change_
)
)
else:
error = str(err)
raise ValueError(error)
filename = _workout_filename(filename)
return filename
def change(name, context=None, changes=None, lens=None, load_path=None, **kwargs):
"""
.. versionadded:: 2014.7.0
This state replaces :py:func:`~salt.states.augeas.setvalue`.
Issue changes to Augeas, optionally for a specific context, with a
specific lens.
name
State name
context
A file path, prefixed by ``/files``. Should resolve to an actual file
(not an arbitrary augeas path). This is used to avoid duplicating the
file name for each item in the changes list (for example, ``set bind 0.0.0.0``
in the example below operates on the file specified by ``context``). If
``context`` is not specified, a file path prefixed by ``/files`` should be
included with the ``set`` command.
The file path is examined to determine if the
specified changes are already present.
.. code-block:: yaml
redis-conf:
augeas.change:
- context: /files/etc/redis/redis.conf
- changes:
- set bind 0.0.0.0
- set maxmemory 1G
changes
List of changes that are issued to Augeas. Available commands are
``set``, ``setm``, ``mv``/``move``, ``ins``/``insert``, and
``rm``/``remove``.
lens
The lens to use, needs to be suffixed with `.lns`, e.g.: `Nginx.lns`.
See the `list of stock lenses <http://augeas.net/stock_lenses.html>`_
shipped with Augeas.
.. versionadded:: 2016.3.0
load_path
A list of directories that modules should be searched in. This is in
addition to the standard load path and the directories in
AUGEAS_LENS_LIB.
Usage examples:
Set the ``bind`` parameter in ``/etc/redis/redis.conf``:
.. code-block:: yaml
redis-conf:
augeas.change:
- changes:
- set /files/etc/redis/redis.conf/bind 0.0.0.0
.. note::
Use the ``context`` parameter to specify the file you want to
manipulate. This way you don't have to include this in the changes
every time:
.. code-block:: yaml
redis-conf:
augeas.change:
- context: /files/etc/redis/redis.conf
- changes:
- set bind 0.0.0.0
- set databases 4
- set maxmemory 1G
Augeas is aware of a lot of common configuration files and their syntax.
It knows the difference between for example ini and yaml files, but also
files with very specific syntax, like the hosts file. This is done with
*lenses*, which provide mappings between the Augeas tree and the file.
There are many `preconfigured lenses`_ that come with Augeas by default,
and they specify the common locations for configuration files. So most
of the time Augeas will know how to manipulate a file. In the event that
you need to manipulate a file that Augeas doesn't know about, you can
specify the lens to use like this:
.. code-block:: yaml
redis-conf:
augeas.change:
- lens: redis.lns
- context: /files/etc/redis/redis.conf
- changes:
- set bind 0.0.0.0
.. note::
Even though Augeas knows that ``/etc/redis/redis.conf`` is a Redis
configuration file and knows how to parse it, it is recommended to
specify the lens anyway. This is because by default, Augeas loads all
known lenses and their associated file paths. All these files are
parsed when Augeas is loaded, which can take some time. When specifying
a lens, Augeas is loaded with only that lens, which speeds things up
quite a bit.
.. _preconfigured lenses: http://augeas.net/stock_lenses.html
A more complex example, this adds an entry to the services file for Zabbix,
and removes an obsolete service:
.. code-block:: yaml
zabbix-service:
augeas.change:
- lens: services.lns
- context: /files/etc/services
- changes:
- ins service-name after service-name[last()]
- set service-name[last()] "zabbix-agent"
- set "service-name[. = 'zabbix-agent']/port" 10050
- set "service-name[. = 'zabbix-agent']/protocol" tcp
- set "service-name[. = 'zabbix-agent']/#comment" "Zabbix Agent service"
- rm "service-name[. = 'im-obsolete']"
- unless: grep "zabbix-agent" /etc/services
.. warning::
Don't forget the ``unless`` here, otherwise it will fail on next runs
because the service is already defined. Additionally you have to quote
lines containing ``service-name[. = 'zabbix-agent']`` otherwise
:mod:`augeas_cfg <salt.modules.augeas_cfg>` execute will fail because
it will receive more parameters than expected.
.. note::
Order is important when defining a service with Augeas, in this case
it's ``port``, ``protocol`` and ``#comment``. For more info about
the lens check `services lens documentation`_.
.. _services lens documentation:
http://augeas.net/docs/references/lenses/files/services-aug.html#Services.record
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not changes or not isinstance(changes, list):
ret["comment"] = "'changes' must be specified as a list"
return ret
if load_path is not None:
if not isinstance(load_path, list):
ret["comment"] = "'load_path' must be specified as a list"
return ret
else:
load_path = ":".join(load_path)
filename = None
if context is None:
try:
filename = _check_filepath(changes)
except ValueError as err:
ret["comment"] = "Error: {}".format(err)
return ret
else:
filename = re.sub("^/files|/$", "", context)
if __opts__["test"]:
ret["result"] = True
ret["comment"] = "Executing commands"
if context:
ret["comment"] += ' in file "{}":\n'.format(context)
ret["comment"] += "\n".join(changes)
return ret
old_file = []
if filename is not None and os.path.isfile(filename):
with salt.utils.files.fopen(filename, "r") as file_:
old_file = [salt.utils.stringutils.to_unicode(x) for x in file_.readlines()]
result = __salt__["augeas.execute"](
context=context, lens=lens, commands=changes, load_path=load_path
)
ret["result"] = result["retval"]
if ret["result"] is False:
ret["comment"] = "Error: {}".format(result["error"])
return ret
if filename is not None and os.path.isfile(filename):
with salt.utils.files.fopen(filename, "r") as file_:
new_file = [salt.utils.stringutils.to_unicode(x) for x in file_.readlines()]
diff = "".join(difflib.unified_diff(old_file, new_file, n=0))
if diff:
ret["comment"] = "Changes have been saved"
ret["changes"] = {"diff": diff}
else:
ret["comment"] = "No changes made"
else:
ret["comment"] = "Changes have been saved"
ret["changes"] = {"updates": changes}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/augeas.py | 0.53777 | 0.166066 | augeas.py | pypi |
import logging
log = logging.getLogger(__name__)
def mapped(
name,
device,
keyfile=None,
opts=None,
config="/etc/crypttab",
persist=True,
immediate=False,
match_on="name",
):
"""
Verify that a device is mapped
name
The name under which the device is to be mapped
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314``.
keyfile
Either ``None`` if the password is to be entered manually on boot, or
an absolute path to a keyfile. If the password is to be asked
interactively, the mapping cannot be performed with ``immediate=True``.
opts
A list object of options or a comma delimited list
config
Set an alternative location for the crypttab, if the map is persistent,
Default is ``/etc/crypttab``
persist
Set if the map should be saved in the crypttab, Default is ``True``
immediate
Set if the device mapping should be executed immediately. Requires that
the keyfile not be ``None``, because the password cannot be asked
interactively. Note that options are not passed through on the initial
mapping. Default is ``False``.
match_on
A name or list of crypttab properties on which this state should be applied.
Default is ``name``, meaning that the line is matched only by the name
parameter. If the desired configuration requires two devices mapped to
the same name, supply a list of parameters to match on.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# If neither option is set, we've been asked to do nothing.
if not immediate and not persist:
ret["result"] = False
ret[
"comment"
] = "Either persist or immediate must be set, otherwise this state does nothing"
return ret
if immediate and (keyfile is None or keyfile == "none" or keyfile == "-"):
ret["result"] = False
ret["changes"][
"cryptsetup"
] = "Device cannot be mapped immediately without a keyfile"
elif immediate:
# Get the active crypt mounts. If ours is listed already, no action is necessary.
active = __salt__["cryptdev.active"]()
if name not in active.keys():
# Open the map using cryptsetup. This does not pass any options.
if opts:
log.warning("Ignore cryptdev configuration when mapping immediately")
if __opts__["test"]:
ret["result"] = None
ret["commment"] = "Device would be mapped immediately"
else:
cryptsetup_result = __salt__["cryptdev.open"](name, device, keyfile)
if cryptsetup_result:
ret["changes"]["cryptsetup"] = "Device mapped using cryptsetup"
else:
ret["changes"][
"cryptsetup"
] = "Device failed to map using cryptsetup"
ret["result"] = False
if persist and not __opts__["test"]:
crypttab_result = __salt__["cryptdev.set_crypttab"](
name,
device,
password=keyfile,
options=opts,
config=config,
match_on=match_on,
)
if crypttab_result:
if crypttab_result == "new":
ret["changes"]["crypttab"] = "Entry added in {}".format(config)
if crypttab_result == "change":
ret["changes"]["crypttab"] = "Existing entry in {} changed".format(
config
)
else:
ret["changes"]["crypttab"] = "Unable to set entry in {}".format(config)
ret["result"] = False
return ret
def unmapped(name, config="/etc/crypttab", persist=True, immediate=False):
"""
Ensure that a device is unmapped
name
The name to ensure is not mapped
config
Set an alternative location for the crypttab, if the map is persistent,
Default is ``/etc/crypttab``
persist
Set if the map should be removed from the crypttab. Default is ``True``
immediate
Set if the device should be unmapped immediately. Default is ``False``.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if immediate:
# Get the active crypt mounts. If ours is not listed already, no action is necessary.
active = __salt__["cryptdev.active"]()
if name in active.keys():
# Close the map using cryptsetup.
if __opts__["test"]:
ret["result"] = None
ret["commment"] = "Device would be unmapped immediately"
else:
cryptsetup_result = __salt__["cryptdev.close"](name)
if cryptsetup_result:
ret["changes"]["cryptsetup"] = "Device unmapped using cryptsetup"
else:
ret["changes"][
"cryptsetup"
] = "Device failed to unmap using cryptsetup"
ret["result"] = False
if persist and not __opts__["test"]:
crypttab_result = __salt__["cryptdev.rm_crypttab"](name, config=config)
if crypttab_result:
if crypttab_result == "change":
ret["changes"]["crypttab"] = "Entry removed from {}".format(config)
else:
ret["changes"]["crypttab"] = "Unable to remove entry in {}".format(config)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/cryptdev.py | 0.700178 | 0.261198 | cryptdev.py | pypi |
__virtualname__ = "keystone_service"
def __virtual__():
if "keystoneng.service_get" in __salt__:
return __virtualname__
return (
False,
"The keystoneng execution module failed to load: shade python module is not"
" available",
)
def present(name, auth=None, **kwargs):
"""
Ensure an service exists and is up-to-date
name
Name of the group
type
Service type
enabled
Boolean to control if service is enabled
description
An arbitrary description of the service
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["keystoneng.setup_clouds"](auth)
service = __salt__["keystoneng.service_get"](name=name)
if service is None:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Service will be created."
return ret
kwargs["name"] = name
service = __salt__["keystoneng.service_create"](**kwargs)
ret["changes"] = service
ret["comment"] = "Created service"
return ret
changes = __salt__["keystoneng.compare_changes"](service, **kwargs)
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Service will be updated."
return ret
kwargs["name"] = service
__salt__["keystoneng.service_update"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated service"
return ret
def absent(name, auth=None):
"""
Ensure service does not exist
name
Name of the service
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
__salt__["keystoneng.setup_clouds"](auth)
service = __salt__["keystoneng.service_get"](name=name)
if service:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"id": service.id}
ret["comment"] = "Service will be deleted."
return ret
__salt__["keystoneng.service_delete"](name=service)
ret["changes"]["id"] = service.id
ret["comment"] = "Deleted service"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/keystone_service.py | 0.745398 | 0.158012 | keystone_service.py | pypi |
import copy
import difflib
import logging
import salt.utils.hashutils
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_s3.get_object_metadata" not in __salt__:
return (False, "boto_s3 module could not be loaded")
return "boto_s3"
# Keys for `extra_args` that we support.
# Currently, this excludes the `ACL` and `Grant*` keys.
# Most keys are stored and returned by AWS:
STORED_EXTRA_ARGS = frozenset(
[
"CacheControl",
"ContentDisposition",
"ContentEncoding",
"ContentLanguage",
"ContentType",
"Expires",
"Metadata",
"ServerSideEncryption",
"SSECustomerAlgorithm",
"SSECustomerKeyMD5",
"SSEKMSKeyId",
"StorageClass",
"WebsiteRedirectLocation",
]
)
# However, some keys are only specified on upload,
# but won't be stored/returned by AWS as metadata:
UPLOAD_ONLY_EXTRA_ARGS = frozenset(
[
# AWS doesn't store customer provided keys,
# can use SSECustomerKeyMD5 to check for correct key
"SSECustomerKey",
"RequestPayer",
]
)
# Some extra args must also be passed along to retrive metadata,
# namely SSE-C (customer-provided encryption) and RequestPayer args.
GET_METADATA_EXTRA_ARGS = frozenset(
["SSECustomerAlgorithm", "SSECustomerKey", "SSECustomerKeyMD5", "RequestPayer"]
)
def object_present(
name,
source=None,
hash_type=None,
extra_args=None,
extra_args_from_pillar="boto_s3_object_extra_args",
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure object exists in S3.
name
The name of the state definition.
This will be used to determine the location of the object in S3,
by splitting on the first slash and using the first part
as the bucket name and the remainder as the S3 key.
source
The source file to upload to S3,
currently this only supports files hosted on the minion's local
file system (starting with /).
hash_type
Hash algorithm to use to check that the object contents are correct.
Defaults to the value of the `hash_type` config option.
extra_args
A dictionary of extra arguments to use when uploading the file.
Note that these are only enforced if new objects are uploaded,
and not modified on existing objects.
The supported args are those in the ALLOWED_UPLOAD_ARGS list at
http://boto3.readthedocs.io/en/latest/reference/customizations/s3.html.
However, Note that the 'ACL', 'GrantFullControl', 'GrantRead',
'GrantReadACP', and 'GrantWriteACL' keys are currently not supported.
extra_args_from_pillar
Name of pillar dict that contains extra arguments.
Extra arguments defined for this specific state will be
merged over those from the pillar.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {
"name": name,
"comment": "",
"changes": {},
}
if extra_args is None:
extra_args = {}
combined_extra_args = copy.deepcopy(
__salt__["config.option"](extra_args_from_pillar, {})
)
__utils__["dictupdate.update"](combined_extra_args, extra_args)
if combined_extra_args:
supported_args = STORED_EXTRA_ARGS | UPLOAD_ONLY_EXTRA_ARGS
combined_extra_args_keys = frozenset(combined_extra_args.keys())
extra_keys = combined_extra_args_keys - supported_args
if extra_keys:
msg = "extra_args keys {} are not supported".format(extra_keys)
return {"error": msg}
# Get the hash of the local file
if not hash_type:
hash_type = __opts__["hash_type"]
try:
digest = salt.utils.hashutils.get_hash(source, form=hash_type)
except OSError as e:
ret["result"] = False
ret["comment"] = "Could not read local file {}: {}".format(
source,
e,
)
return ret
except ValueError as e:
# Invalid hash type exception from get_hash
ret["result"] = False
ret["comment"] = "Could not hash local file {}: {}".format(
source,
e,
)
return ret
HASH_METADATA_KEY = "salt_managed_content_hash"
combined_extra_args.setdefault("Metadata", {})
if HASH_METADATA_KEY in combined_extra_args["Metadata"]:
# Be lenient, silently allow hash metadata key if digest value matches
if combined_extra_args["Metadata"][HASH_METADATA_KEY] != digest:
ret["result"] = False
ret["comment"] = (
"Salt uses the {} metadata key internally,"
"do not pass it to the boto_s3.object_present state.".format(
HASH_METADATA_KEY
)
)
return ret
combined_extra_args["Metadata"][HASH_METADATA_KEY] = digest
# Remove upload-only keys from full set of extra_args
# to create desired dict for comparisons
desired_metadata = {
k: v for k, v in combined_extra_args.items() if k not in UPLOAD_ONLY_EXTRA_ARGS
}
# Some args (SSE-C, RequestPayer) must also be passed to get_metadata
metadata_extra_args = {
k: v for k, v in combined_extra_args.items() if k in GET_METADATA_EXTRA_ARGS
}
r = __salt__["boto_s3.get_object_metadata"](
name,
extra_args=metadata_extra_args,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to check if S3 object exists: {}.".format(
r["error"],
)
return ret
if r["result"]:
# Check if content and metadata match
# A hash of the content is injected into the metadata,
# so we can combine both checks into one
# Only check metadata keys specified by the user,
# ignore other fields that have been set
s3_metadata = {
k: r["result"][k]
for k in STORED_EXTRA_ARGS
if k in desired_metadata and k in r["result"]
}
if s3_metadata == desired_metadata:
ret["result"] = True
ret["comment"] = "S3 object {} is present.".format(name)
return ret
action = "update"
else:
s3_metadata = None
action = "create"
def _yaml_safe_dump(attrs):
"""
Safely dump YAML using a readable flow style
"""
dumper_name = "IndentedSafeOrderedDumper"
dumper = __utils__["yaml.get_dumper"](dumper_name)
return __utils__["yaml.dump"](attrs, default_flow_style=False, Dumper=dumper)
changes_diff = "".join(
difflib.unified_diff(
_yaml_safe_dump(s3_metadata).splitlines(True),
_yaml_safe_dump(desired_metadata).splitlines(True),
)
)
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "S3 object {} set to be {}d.".format(name, action)
ret["comment"] += "\nChanges:\n{}".format(changes_diff)
ret["changes"] = {"diff": changes_diff}
return ret
r = __salt__["boto_s3.upload_file"](
source,
name,
extra_args=combined_extra_args,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to {} S3 object: {}.".format(
action,
r["error"],
)
return ret
ret["result"] = True
ret["comment"] = "S3 object {} {}d.".format(name, action)
ret["comment"] += "\nChanges:\n{}".format(changes_diff)
ret["changes"] = {"diff": changes_diff}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_s3.py | 0.586878 | 0.160759 | boto_s3.py | pypi |
__virtualname__ = "sysrc"
def __virtual__():
"""
Only load if sysrc executable exists
"""
if __salt__["cmd.has_exec"]("sysrc"):
return True
return (False, "Command not found: sysrc")
def managed(name, value, **kwargs):
"""
Ensure a sysrc variable is set to a specific value.
name
The variable name to set
value
Value to set the variable to
file
(optional) The rc file to add the variable to.
jail
(option) the name or JID of the jail to set the value in.
Example:
.. code-block:: yaml
syslogd:
sysrc.managed:
- name: syslogd_flags
- value: -ss
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Check the current state
current_state = __salt__["sysrc.get"](name=name, **kwargs)
if current_state is not None:
for rcname, rcdict in current_state.items():
if rcdict[name] == value:
ret["result"] = True
ret["comment"] = "{} is already set to the desired value.".format(name)
return ret
if __opts__["test"] is True:
ret["comment"] = 'The value of "{}" will be changed!'.format(name)
ret["changes"] = {
"old": current_state,
"new": name + " = " + value + " will be set.",
}
# When test=true return none
ret["result"] = None
return ret
new_state = __salt__["sysrc.set"](name=name, value=value, **kwargs)
ret["comment"] = 'The value of "{}" was changed!'.format(name)
ret["changes"] = {"old": current_state, "new": new_state}
ret["result"] = True
return ret
def absent(name, **kwargs):
"""
Ensure a sysrc variable is absent.
name
The variable name to set
file
(optional) The rc file to add the variable to.
jail
(option) the name or JID of the jail to set the value in.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Check the current state
current_state = __salt__["sysrc.get"](name=name, **kwargs)
if current_state is None:
ret["result"] = True
ret["comment"] = '"{}" is already absent.'.format(name)
return ret
if __opts__["test"] is True:
ret["comment"] = '"{}" will be removed!'.format(name)
ret["changes"] = {
"old": current_state,
"new": '"{}" will be removed.'.format(name),
}
# When test=true return none
ret["result"] = None
return ret
new_state = __salt__["sysrc.remove"](name=name, **kwargs)
ret["comment"] = '"{}" was removed!'.format(name)
ret["changes"] = {"old": current_state, "new": new_state}
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/sysrc.py | 0.509276 | 0.278453 | sysrc.py | pypi |
def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
"""
Ensure range record is present.
infoblox_range.present:
start_addr: '129.97.150.160',
end_addr: '129.97.150.170',
Verbose state example:
.. code-block:: yaml
infoblox_range.present:
data: {
'always_update_dns': False,
'authority': False,
'comment': 'range of IP addresses used for salt.. was used for ghost images deployment',
'ddns_generate_hostname': True,
'deny_all_clients': False,
'deny_bootp': False,
'disable': False,
'email_list': [],
'enable_ddns': False,
'enable_dhcp_thresholds': False,
'enable_email_warnings': False,
'enable_ifmap_publishing': False,
'enable_snmp_warnings': False,
'end_addr': '129.97.150.169',
'exclude': [],
'extattrs': {},
'fingerprint_filter_rules': [],
'high_water_mark': 95,
'high_water_mark_reset': 85,
'ignore_dhcp_option_list_request': False,
'lease_scavenge_time': -1,
'logic_filter_rules': [],
'low_water_mark': 0,
'low_water_mark_reset': 10,
'mac_filter_rules': [],
'member': {'_struct': 'dhcpmember',
'ipv4addr': '129.97.128.9',
'name': 'cn-dhcp-mc.example.ca'},
'ms_options': [],
'nac_filter_rules': [],
'name': 'ghost-range',
'network': '129.97.150.0/24',
'network_view': 'default',
'option_filter_rules': [],
'options': [{'name': 'dhcp-lease-time',
'num': 51,
'use_option': False,
'value': '43200',
'vendor_class': 'DHCP'}],
'recycle_leases': True,
'relay_agent_filter_rules': [],
'server_association_type': 'MEMBER',
'start_addr': '129.97.150.160',
'update_dns_on_lease_renewal': False,
'use_authority': False,
'use_bootfile': False,
'use_bootserver': False,
'use_ddns_domainname': False,
'use_ddns_generate_hostname': True,
'use_deny_bootp': False,
'use_email_list': False,
'use_enable_ddns': False,
'use_enable_dhcp_thresholds': False,
'use_enable_ifmap_publishing': False,
'use_ignore_dhcp_option_list_request': False,
'use_known_clients': False,
'use_lease_scavenge_time': False,
'use_nextserver': False,
'use_options': False,
'use_recycle_leases': False,
'use_unknown_clients': False,
'use_update_dns_on_lease_renewal': False
}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not data:
data = {}
if "name" not in data:
data.update({"name": name})
if "start_addr" not in data:
data.update({"start_addr": start_addr})
if "end_addr" not in data:
data.update({"end_addr": end_addr})
obj = __salt__["infoblox.get_ipv4_range"](
data["start_addr"], data["end_addr"], **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=data["start_addr"], end_addr=None, **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=None, end_addr=data["end_addr"], **api_opts
)
if obj:
diff = __salt__["infoblox.diff_objects"](data, obj)
if not diff:
ret["result"] = True
ret["comment"] = "supplied fields in correct state"
return ret
if diff:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to update record"
return ret
new_obj = __salt__["infoblox.update_object"](
obj["_ref"], data=data, **api_opts
)
ret["result"] = True
ret["comment"] = "record fields updated"
ret["changes"] = {"diff": diff}
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to create record {}".format(name)
return ret
new_obj_ref = __salt__["infoblox.create_ipv4_range"](data, **api_opts)
new_obj = __salt__["infoblox.get_ipv4_range"](
data["start_addr"], data["end_addr"], **api_opts
)
ret["result"] = True
ret["comment"] = "record created"
ret["changes"] = {"old": "None", "new": {"_ref": new_obj_ref, "data": new_obj}}
return ret
def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
"""
Ensure the range is removed
Supplying the end of the range is optional.
State example:
.. code-block:: yaml
infoblox_range.absent:
- name: 'vlan10'
infoblox_range.absent:
- name:
- start_addr: 127.0.1.20
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not data:
data = {}
if "name" not in data:
data.update({"name": name})
if "start_addr" not in data:
data.update({"start_addr": start_addr})
if "end_addr" not in data:
data.update({"end_addr": end_addr})
obj = __salt__["infoblox.get_ipv4_range"](
data["start_addr"], data["end_addr"], **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=data["start_addr"], end_addr=None, **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=None, end_addr=data["end_addr"], **api_opts
)
if not obj:
ret["result"] = True
ret["comment"] = "already deleted"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to delete range"
return ret
if __salt__["infoblox.delete_object"](objref=obj["_ref"]):
ret["result"] = True
ret["changes"] = {
"old": "Found {} - {}".format(start_addr, end_addr),
"new": "Removed",
}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/infoblox_range.py | 0.529263 | 0.157622 | infoblox_range.py | pypi |
import logging
log = logging.getLogger(__name__)
def present(name, save=False, **kwargs):
"""
Ensure beacon is configured with the included beacon data.
name
The name of the beacon to ensure is configured.
save
True/False, if True the beacons.conf file be updated too. Default is False.
Example:
.. code-block:: yaml
ps_beacon:
beacon.present:
- name: ps
- save: True
- enable: False
- services:
salt-master: running
apache2: stopped
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_beacons = __salt__["beacons.list"](return_yaml=False, **kwargs)
beacon_data = [{k: v} for k, v in kwargs.items()]
if name in current_beacons:
if beacon_data == current_beacons[name]:
ret["comment"].append("Job {} in correct state".format(name))
else:
if __opts__.get("test"):
kwargs["test"] = True
result = __salt__["beacons.modify"](name, beacon_data, **kwargs)
ret["comment"].append(result["comment"])
ret["changes"] = result["changes"]
else:
result = __salt__["beacons.modify"](name, beacon_data, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
if "changes" in result:
ret["comment"].append("Modifying {} in beacons".format(name))
ret["changes"] = result["changes"]
else:
ret["comment"].append(result["comment"])
else:
if __opts__.get("test"):
kwargs["test"] = True
result = __salt__["beacons.add"](name, beacon_data, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["beacons.add"](name, beacon_data, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Adding {} to beacons".format(name))
if save:
if __opts__.get("test"):
ret["comment"].append("Beacon {} would be saved".format(name))
else:
__salt__["beacons.save"]()
ret["comment"].append("Beacon {} saved".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def absent(name, save=False, **kwargs):
"""
Ensure beacon is absent.
name
The name of the beacon that is ensured absent.
save
True/False, if True the beacons.conf file be updated too. Default is False.
Example:
.. code-block:: yaml
remove_beacon:
beacon.absent:
- name: ps
- save: True
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_beacons = __salt__["beacons.list"](return_yaml=False, **kwargs)
if name in current_beacons:
if __opts__.get("test"):
kwargs["test"] = True
result = __salt__["beacons.delete"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["beacons.delete"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Removed {} from beacons".format(name))
else:
ret["comment"].append("{} not configured in beacons".format(name))
if save:
if __opts__.get("test"):
ret["comment"].append("Beacon {} would be saved".format(name))
else:
__salt__["beacons.save"]()
ret["comment"].append("Beacon {} saved".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def enabled(name, **kwargs):
"""
Enable a beacon.
name
The name of the beacon to enable.
Example:
.. code-block:: yaml
enable_beacon:
beacon.enabled:
- name: ps
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_beacons = __salt__["beacons.list"](return_yaml=False, **kwargs)
if name in current_beacons:
if __opts__.get("test"):
kwargs["test"] = True
result = __salt__["beacons.enable_beacon"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["beacons.enable_beacon"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Enabled {} from beacons".format(name))
else:
ret["comment"].append("{} not a configured beacon".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret
def disabled(name, **kwargs):
"""
Disable a beacon.
name
The name of the beacon to disable.
Example:
.. code-block:: yaml
disable_beacon:
beacon.disabled:
- name: psp
"""
ret = {"name": name, "result": True, "changes": {}, "comment": []}
current_beacons = __salt__["beacons.list"](return_yaml=False, **kwargs)
if name in current_beacons:
if __opts__.get("test"):
kwargs["test"] = True
result = __salt__["beacons.disable_beacon"](name, **kwargs)
ret["comment"].append(result["comment"])
else:
result = __salt__["beacons.disable_beacon"](name, **kwargs)
if not result["result"]:
ret["result"] = result["result"]
ret["comment"] = result["comment"]
return ret
else:
ret["comment"].append("Disabled beacon {}.".format(name))
else:
ret["comment"].append("Job {} is not configured.".format(name))
ret["comment"] = "\n".join(ret["comment"])
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/beacon.py | 0.631822 | 0.169612 | beacon.py | pypi |
import logging
import re
import time
__monitor__ = [
"query",
]
log = logging.getLogger(__name__)
def query(
name,
match=None,
match_type="string",
status=None,
status_type="string",
wait_for=None,
**kwargs
):
"""
Perform an HTTP query and statefully return the result
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
name
The name of the query.
match
Specifies a pattern to look for in the return text. By default, this will
perform a string comparison of looking for the value of match in the return
text.
match_type
Specifies the type of pattern matching to use on match. Default is ``string``, but
can also be set to ``pcre`` to use regular expression matching if a more
complex pattern matching is required.
.. note::
Despite the name of ``match_type`` for this argument, this setting
actually uses Python's ``re.search()`` function rather than Python's
``re.match()`` function.
status
The status code for a URL for which to be checked. Can be used instead of
or in addition to the ``match`` setting. This can be passed as an individual status code
or a list of status codes.
status_type
Specifies the type of pattern matching to use for status. Default is ``string``, but
can also be set to ``pcre`` to use regular expression matching if a more
complex pattern matching is required. Additionally, if a list of strings representing
statuses is given, the type ``list`` can be used.
.. versionadded:: 3000
.. note::
Despite the name of ``match_type`` for this argument, this setting
actually uses Python's ``re.search()`` function rather than Python's
``re.match()`` function.
If both ``match`` and ``status`` options are set, both settings will be checked.
However, note that if only one option is ``True`` and the other is ``False``,
then ``False`` will be returned. If this case is reached, the comments in the
return data will contain troubleshooting information.
For more information about the ``http.query`` state, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
.. code-block:: yaml
query_example:
http.query:
- name: 'http://example.com/'
- status: 200
query_example2:
http.query:
- name: 'http://example.com/'
- status:
- 200
- 201
- status_type: list
"""
# Monitoring state, but changes may be made over HTTP
ret = {
"name": name,
"result": None,
"comment": "",
"changes": {},
"data": {},
} # Data field for monitoring state
if match is None and status is None:
ret["result"] = False
ret[
"comment"
] += " Either match text (match) or a status code (status) is required."
return ret
if "decode" not in kwargs:
kwargs["decode"] = False
kwargs["text"] = True
kwargs["status"] = True
if __opts__["test"]:
kwargs["test"] = True
if wait_for:
data = __salt__["http.wait_for_successful_query"](
name, wait_for=wait_for, **kwargs
)
else:
data = __salt__["http.query"](name, **kwargs)
if match is not None:
if match_type == "string":
if str(match) in data.get("text", ""):
ret["result"] = True
ret["comment"] += ' Match text "{}" was found.'.format(match)
else:
ret["result"] = False
ret["comment"] += ' Match text "{}" was not found.'.format(match)
elif match_type == "pcre":
if re.search(str(match), str(data.get("text", ""))):
ret["result"] = True
ret["comment"] += ' Match pattern "{}" was found.'.format(match)
else:
ret["result"] = False
ret["comment"] += ' Match pattern "{}" was not found.'.format(match)
if status is not None:
# Deals with case of status_type as a list of strings representing statuses
if status_type == "list":
for stat in status:
if str(data.get("status", "")) == str(stat):
ret["comment"] += " Status {} was found.".format(stat)
if ret["result"] is None:
ret["result"] = True
if ret["result"] is not True:
ret["comment"] += " Statuses {} were not found.".format(status)
ret["result"] = False
# Deals with the case of status_type representing a regex
elif status_type == "pcre":
if re.search(str(status), str(data.get("status", ""))):
ret["comment"] += ' Status pattern "{}" was found.'.format(status)
if ret["result"] is None:
ret["result"] = True
else:
ret["comment"] += ' Status pattern "{}" was not found.'.format(status)
ret["result"] = False
# Deals with the case of status_type as a single string representing a status
elif status_type == "string":
if str(data.get("status", "")) == str(status):
ret["comment"] += " Status {} was found.".format(status)
if ret["result"] is None:
ret["result"] = True
else:
ret["comment"] += " Status {} was not found.".format(status)
ret["result"] = False
# cleanup spaces in comment
ret["comment"] = ret["comment"].strip()
if __opts__["test"] is True:
ret["result"] = None
ret["comment"] += " (TEST MODE"
if "test_url" in kwargs:
ret["comment"] += ", TEST URL WAS: {}".format(kwargs["test_url"])
ret["comment"] += ")"
ret["data"] = data
return ret
def wait_for_successful_query(name, wait_for=300, **kwargs):
"""
Like query but, repeat and wait until match/match_type or status is fulfilled. State returns result from last
query state in case of success or if no successful query was made within wait_for timeout.
name
The name of the query.
wait_for
Total time to wait for requests that succeed.
request_interval
Optional interval to delay requests by N seconds to reduce the number of requests sent.
.. note::
All other arguments are passed to the http.query state.
"""
starttime = time.time()
while True:
caught_exception = None
ret = None
try:
ret = query(name, **kwargs)
if ret["result"]:
return ret
except Exception as exc: # pylint: disable=broad-except
caught_exception = exc
if time.time() > starttime + wait_for:
if not ret and caught_exception:
# workaround pylint bug https://www.logilab.org/ticket/3207
raise caught_exception # pylint: disable=E0702
return ret
elif "request_interval" in kwargs:
# Space requests out by delaying for an interval
log.debug("delaying query for %s seconds.", kwargs["request_interval"])
time.sleep(kwargs["request_interval"]) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/http.py | 0.667581 | 0.427755 | http.py | pypi |
def __virtual__():
"""
Load only on minions that have the win_snmp module.
"""
if "win_snmp.get_agent_settings" in __salt__:
return True
return (False, "win_snmp module could not be loaded")
def agent_settings(name, contact, location, services=None):
"""
Manage the SNMP sysContact, sysLocation, and sysServices settings.
:param str contact: The SNMP contact.
:param str location: The SNMP location.
:param str services: A list of selected services.
Example of usage:
.. code-block:: yaml
snmp-agent-settings:
win_snmp.agent_settings:
- contact: Test Contact
- location: Test Location
- services:
- Physical
- Internet
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
ret_settings = {"changes": dict(), "failures": dict()}
if not services:
services = ["None"]
# Filter services for unique items, and sort them for comparison purposes.
services = sorted(set(services))
settings = {"contact": contact, "location": location, "services": services}
current_settings = __salt__["win_snmp.get_agent_settings"]()
for setting in settings:
if str(settings[setting]) != str(current_settings[setting]):
ret_settings["changes"][setting] = {
"old": current_settings[setting],
"new": settings[setting],
}
if not ret_settings["changes"]:
ret["comment"] = "Agent settings already contain the provided values."
ret["result"] = True
return ret
elif __opts__["test"]:
ret["comment"] = "Agent settings will be changed."
ret["changes"] = ret_settings
return ret
__salt__["win_snmp.set_agent_settings"](**settings)
new_settings = __salt__["win_snmp.get_agent_settings"]()
for setting in settings:
if settings[setting] != new_settings[setting]:
ret_settings["failures"][setting] = {
"old": current_settings[setting],
"new": new_settings[setting],
}
ret_settings["changes"].pop(setting, None)
if ret_settings["failures"]:
ret["comment"] = "Some agent settings failed to change."
ret["changes"] = ret_settings
ret["result"] = False
else:
ret["comment"] = "Set agent settings to contain the provided values."
ret["changes"] = ret_settings["changes"]
ret["result"] = True
return ret
def auth_traps_enabled(name, status=True):
"""
Manage the sending of authentication traps.
:param bool status: The enabled status.
Example of usage:
.. code-block:: yaml
snmp-auth-traps:
win_snmp.auth_traps_enabled:
- status: True
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
vname = "EnableAuthenticationTraps"
current_status = __salt__["win_snmp.get_auth_traps_enabled"]()
if status == current_status:
ret["comment"] = "{} already contains the provided value.".format(vname)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "{} will be changed.".format(vname)
ret["changes"] = {"old": current_status, "new": status}
else:
ret["comment"] = "Set {} to contain the provided value.".format(vname)
ret["changes"] = {"old": current_status, "new": status}
ret["result"] = __salt__["win_snmp.set_auth_traps_enabled"](status=status)
return ret
def community_names(name, communities=None):
"""
Manage the SNMP accepted community names and their permissions.
:param str communities: A dictionary of SNMP communities and permissions.
Example of usage:
.. code-block:: yaml
snmp-community-names:
win_snmp.community_names:
- communities:
TestCommunity: Read Only
OtherCommunity: Read Write
"""
ret = {"name": name, "changes": dict(), "comment": "", "result": None}
ret_communities = {"changes": dict(), "failures": dict()}
if not communities:
communities = dict()
current_communities = __salt__["win_snmp.get_community_names"]()
# Note any existing communities that should be removed.
for current_vname in current_communities:
if current_vname not in communities:
ret_communities["changes"][current_vname] = {
"old": current_communities[current_vname],
"new": None,
}
# Note any new communities or existing communities that should be changed.
for vname in communities:
current_vdata = None
if vname in current_communities:
current_vdata = current_communities[vname]
if communities[vname] != current_vdata:
ret_communities["changes"][vname] = {
"old": current_vdata,
"new": communities[vname],
}
if not ret_communities["changes"]:
ret["comment"] = "Communities already contain the provided values."
ret["result"] = True
return ret
elif __opts__["test"]:
ret["comment"] = "Communities will be changed."
ret["changes"] = ret_communities
return ret
__salt__["win_snmp.set_community_names"](communities=communities)
new_communities = __salt__["win_snmp.get_community_names"]()
# Verify that any communities that needed to be removed were removed.
for new_vname in new_communities:
if new_vname not in communities:
ret_communities["failures"][new_vname] = {
"old": current_communities[new_vname],
"new": new_communities[new_vname],
}
ret_communities["changes"].pop(new_vname, None)
# Verify that any new communities or existing communities that
# needed to be changed were changed.
for vname in communities:
new_vdata = None
if vname in new_communities:
new_vdata = new_communities[vname]
if communities[vname] != new_vdata:
ret_communities["failures"][vname] = {
"old": current_communities[vname],
"new": new_vdata,
}
ret_communities["changes"].pop(vname, None)
if ret_communities["failures"]:
ret["comment"] = "Some communities failed to change."
ret["changes"] = ret_communities
ret["result"] = False
else:
ret["comment"] = "Set communities to contain the provided values."
ret["changes"] = ret_communities["changes"]
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_snmp.py | 0.527803 | 0.189727 | win_snmp.py | pypi |
import logging
import os
import salt.utils.json
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_elasticsearch_domain.exists" in __salt__:
return "boto_elasticsearch_domain"
return (False, "boto_elasticsearch_domain module could not be loaded")
def _compare_json(current, desired):
return __utils__["boto3.json_objs_equal"](current, desired)
def present(
name,
DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None,
key=None,
keyid=None,
profile=None,
ElasticsearchVersion="1.5",
):
"""
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
"""
ret = {"name": DomainName, "result": True, "comment": "", "changes": {}}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
"DedicatedMasterEnabled": False,
"InstanceCount": 1,
"InstanceType": "m3.medium.elasticsearch",
"ZoneAwarenessEnabled": False,
}
if EBSOptions is None:
EBSOptions = {
"EBSEnabled": False,
}
if SnapshotOptions is None:
SnapshotOptions = {"AutomatedSnapshotStartHour": 0}
if AdvancedOptions is None:
AdvancedOptions = {"rest.action.multi.allow_explicit_index": "true"}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, str):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies)
except ValueError as e:
ret["result"] = False
ret["comment"] = "Failed to create domain: {}.".format(e.message)
return ret
r = __salt__["boto_elasticsearch_domain.exists"](
DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to create domain: {}.".format(r["error"]["message"])
return ret
if not r.get("exists"):
if __opts__["test"]:
ret["comment"] = "Domain {} is set to be created.".format(DomainName)
ret["result"] = None
return ret
r = __salt__["boto_elasticsearch_domain.create"](
DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not r.get("created"):
ret["result"] = False
ret["comment"] = "Failed to create domain: {}.".format(
r["error"]["message"]
)
return ret
_describe = __salt__["boto_elasticsearch_domain.describe"](
DomainName, region=region, key=key, keyid=keyid, profile=profile
)
ret["changes"]["old"] = {"domain": None}
ret["changes"]["new"] = _describe
ret["comment"] = "Domain {} created.".format(DomainName)
return ret
ret["comment"] = os.linesep.join(
[ret["comment"], "Domain {} is present.".format(DomainName)]
)
ret["changes"] = {}
# domain exists, ensure config matches
_status = __salt__["boto_elasticsearch_domain.status"](
DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile
)["domain"]
if _status.get("ElasticsearchVersion") != str(ElasticsearchVersion):
ret["result"] = False
ret[
"comment"
] = "Failed to update domain: version cannot be modified from {} to {}.".format(
_status.get("ElasticsearchVersion"),
str(ElasticsearchVersion),
)
return ret
_describe = __salt__["boto_elasticsearch_domain.describe"](
DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile
)["domain"]
_describe["AccessPolicies"] = salt.utils.json.loads(_describe["AccessPolicies"])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get("EBSOptions", {}).get("EBSEnabled"):
opts = _describe.get("EBSOptions", {})
opts.pop("VolumeSize", None)
opts.pop("VolumeType", None)
comm_args = {}
need_update = False
es_opts = {
"ElasticsearchClusterConfig": ElasticsearchClusterConfig,
"EBSOptions": EBSOptions,
"AccessPolicies": AccessPolicies,
"SnapshotOptions": SnapshotOptions,
"AdvancedOptions": AdvancedOptions,
}
for k, v in es_opts.items():
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret["changes"].setdefault("new", {})[k] = v
ret["changes"].setdefault("old", {})[k] = _describe[k]
if need_update:
if __opts__["test"]:
msg = "Domain {} set to be modified.".format(DomainName)
ret["comment"] = msg
ret["result"] = None
return ret
ret["comment"] = os.linesep.join([ret["comment"], "Domain to be modified"])
r = __salt__["boto_elasticsearch_domain.update"](
DomainName=DomainName,
region=region,
key=key,
keyid=keyid,
profile=profile,
**comm_args
)
if not r.get("updated"):
ret["result"] = False
ret["comment"] = "Failed to update domain: {}.".format(r["error"])
ret["changes"] = {}
return ret
return ret
def absent(name, DomainName, region=None, key=None, keyid=None, profile=None):
"""
Ensure domain with passed properties is absent.
name
The name of the state definition.
DomainName
Name of the domain.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": DomainName, "result": True, "comment": "", "changes": {}}
r = __salt__["boto_elasticsearch_domain.exists"](
DomainName, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to delete domain: {}.".format(r["error"]["message"])
return ret
if r and not r["exists"]:
ret["comment"] = "Domain {} does not exist.".format(DomainName)
return ret
if __opts__["test"]:
ret["comment"] = "Domain {} is set to be removed.".format(DomainName)
ret["result"] = None
return ret
r = __salt__["boto_elasticsearch_domain.delete"](
DomainName, region=region, key=key, keyid=keyid, profile=profile
)
if not r["deleted"]:
ret["result"] = False
ret["comment"] = "Failed to delete domain: {}.".format(r["error"]["message"])
return ret
ret["changes"]["old"] = {"domain": DomainName}
ret["changes"]["new"] = {"domain": None}
ret["comment"] = "Domain {} deleted.".format(DomainName)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_elasticsearch_domain.py | 0.525612 | 0.239149 | boto_elasticsearch_domain.py | pypi |
import logging
import salt.utils.args
import salt.utils.dockermod
from salt.exceptions import CommandExecutionError
# Enable proper logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "docker_image"
__virtual_aliases__ = ("moby_image",)
def __virtual__():
"""
Only load if the docker execution module is available
"""
if "docker.version" in __salt__:
return __virtualname__
return (False, __salt__.missing_fun_string("docker.version"))
def present(
name,
tag=None,
build=None,
load=None,
force=False,
insecure_registry=False,
client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
dockerfile=None,
sls=None,
base="opensuse/python",
saltenv="base",
pillarenv=None,
pillar=None,
**kwargs
):
"""
.. versionchanged:: 2018.3.0
The ``tag`` argument has been added. It is now required unless pulling
from a registry.
Ensure that an image is present. The image can either be pulled from a
Docker registry, built from a Dockerfile, loaded from a saved image, or
built by running SLS files against a base image.
If none of the ``build``, ``load``, or ``sls`` arguments are used, then Salt
will pull from the :ref:`configured registries <docker-authentication>`. If
the specified image already exists, it will not be pulled unless ``force``
is set to ``True``. Here is an example of a state that will pull an image
from the Docker Hub:
.. code-block:: yaml
myuser/myimage:
docker_image.present:
- tag: mytag
tag
Tag name for the image. Required when using ``build``, ``load``, or
``sls`` to create the image, but optional if pulling from a repository.
.. versionadded:: 2018.3.0
build
Path to directory on the Minion containing a Dockerfile
.. code-block:: yaml
myuser/myimage:
docker_image.present:
- build: /home/myuser/docker/myimage
- tag: mytag
myuser/myimage:
docker_image.present:
- build: /home/myuser/docker/myimage
- tag: mytag
- dockerfile: Dockerfile.alternative
The image will be built using :py:func:`docker.build
<salt.modules.dockermod.build>` and the specified image name and tag
will be applied to it.
.. versionadded:: 2016.11.0
.. versionchanged:: 2018.3.0
The ``tag`` must be manually specified using the ``tag`` argument.
load
Loads a tar archive created with :py:func:`docker.save
<salt.modules.dockermod.save>` (or the ``docker save`` Docker CLI
command), and assigns it the specified repo and tag.
.. code-block:: yaml
myuser/myimage:
docker_image.present:
- load: salt://path/to/image.tar
- tag: mytag
.. versionchanged:: 2018.3.0
The ``tag`` must be manually specified using the ``tag`` argument.
force : False
Set this parameter to ``True`` to force Salt to pull/build/load the
image even if it is already present.
client_timeout
Timeout in seconds for the Docker client. This is not a timeout for
the state, but for receiving a response from the API.
dockerfile
Allows for an alternative Dockerfile to be specified. Path to alternative
Dockefile is relative to the build path for the Docker container.
.. versionadded:: 2016.11.0
sls
Allow for building of image with :py:func:`docker.sls_build
<salt.modules.dockermod.sls_build>` by specifying the SLS files with
which to build. This can be a list or comma-separated string.
.. code-block:: yaml
myuser/myimage:
docker_image.present:
- tag: latest
- sls:
- webapp1
- webapp2
- base: centos
- saltenv: base
.. versionadded:: 2017.7.0
.. versionchanged:: 2018.3.0
The ``tag`` must be manually specified using the ``tag`` argument.
base
Base image with which to start :py:func:`docker.sls_build
<salt.modules.dockermod.sls_build>`
.. versionadded:: 2017.7.0
saltenv
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
.. versionadded:: 2017.7.0
.. versionchanged:: 2018.3.0
Now uses the effective saltenv if not explicitly passed. In earlier
versions, ``base`` was assumed as a default.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2018.3.0
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionadded:: 2018.3.0
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if not isinstance(name, str):
name = str(name)
# At most one of the args that result in an image being built can be used
num_build_args = len([x for x in (build, load, sls) if x is not None])
if num_build_args > 1:
ret["comment"] = "Only one of 'build', 'load', or 'sls' is permitted."
return ret
elif num_build_args == 1:
# If building, we need the tag to be specified
if not tag:
ret["comment"] = (
"The 'tag' argument is required if any one of 'build', "
"'load', or 'sls' is used."
)
return ret
if not isinstance(tag, str):
tag = str(tag)
full_image = ":".join((name, tag))
else:
if tag:
name = "{}:{}".format(name, tag)
full_image = name
try:
image_info = __salt__["docker.inspect_image"](full_image)
except CommandExecutionError as exc:
msg = exc.__str__()
if "404" in msg:
# Image not present
image_info = None
else:
ret["comment"] = msg
return ret
if image_info is not None:
# Specified image is present
if not force:
ret["result"] = True
ret["comment"] = "Image {} already present".format(full_image)
return ret
if build or sls:
action = "built"
elif load:
action = "loaded"
else:
action = "pulled"
if __opts__["test"]:
ret["result"] = None
if (image_info is not None and force) or image_info is None:
ret["comment"] = "Image {} will be {}".format(full_image, action)
return ret
if build:
# Get the functions default value and args
argspec = salt.utils.args.get_function_argspec(__salt__["docker.build"])
# Map any if existing args from kwargs into the build_args dictionary
build_args = dict(list(zip(argspec.args, argspec.defaults)))
for k in build_args:
if k in kwargs.get("kwargs", {}):
build_args[k] = kwargs.get("kwargs", {}).get(k)
try:
# map values passed from the state to the build args
build_args["path"] = build
build_args["repository"] = name
build_args["tag"] = tag
build_args["dockerfile"] = dockerfile
image_update = __salt__["docker.build"](**build_args)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Encountered error building {} as {}: {}".format(
build, full_image, exc
)
return ret
if image_info is None or image_update["Id"] != image_info["Id"][:12]:
ret["changes"] = image_update
elif sls:
_locals = locals()
sls_build_kwargs = {
k: _locals[k]
for k in ("saltenv", "pillarenv", "pillar")
if _locals[k] is not None
}
try:
image_update = __salt__["docker.sls_build"](
repository=name, tag=tag, base=base, mods=sls, **sls_build_kwargs
)
except Exception as exc: # pylint: disable=broad-except
ret[
"comment"
] = "Encountered error using SLS {} for building {}: {}".format(
sls, full_image, exc
)
return ret
if image_info is None or image_update["Id"] != image_info["Id"][:12]:
ret["changes"] = image_update
elif load:
try:
image_update = __salt__["docker.load"](path=load, repository=name, tag=tag)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Encountered error loading {} as {}: {}".format(
load, full_image, exc
)
return ret
if image_info is None or image_update.get("Layers", []):
ret["changes"] = image_update
else:
try:
image_update = __salt__["docker.pull"](
name, insecure_registry=insecure_registry, client_timeout=client_timeout
)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Encountered error pulling {}: {}".format(full_image, exc)
return ret
if (
image_info is not None
and image_info["Id"][:12]
== image_update.get("Layers", {}).get("Already_Pulled", [None])[0]
):
# Image was pulled again (because of force) but was also
# already there. No new image was available on the registry.
pass
elif image_info is None or image_update.get("Layers", {}).get("Pulled"):
# Only add to the changes dict if layers were pulled
ret["changes"] = image_update
error = False
try:
__salt__["docker.inspect_image"](full_image)
except CommandExecutionError as exc:
msg = exc.__str__()
if "404" not in msg:
error = "Failed to inspect image '{}' after it was {}: {}".format(
full_image, action, msg
)
if error:
ret["comment"] = error
else:
ret["result"] = True
if not ret["changes"]:
ret["comment"] = "Image '{}' was {}, but there were no changes".format(
name, action
)
else:
ret["comment"] = "Image '{}' was {}".format(full_image, action)
return ret
def absent(name=None, images=None, force=False):
"""
Ensure that an image is absent from the Minion. Image names can be
specified either using ``repo:tag`` notation, or just the repo name (in
which case a tag of ``latest`` is assumed).
images
Run this state on more than one image at a time. The following two
examples accomplish the same thing:
.. code-block:: yaml
remove_images:
docker_image.absent:
- names:
- busybox
- centos:6
- nginx
.. code-block:: yaml
remove_images:
docker_image.absent:
- images:
- busybox
- centos:6
- nginx
However, the second example will be a bit quicker since Salt will do
all the deletions in a single run, rather than executing the state
separately on each image (as it would in the first example).
force : False
Salt will fail to remove any images currently in use by a container.
Set this option to true to remove the image even if it is already
present.
.. note::
This option can also be overridden by Pillar data. If the Minion
has a pillar variable named ``docker.running.force`` which is
set to ``True``, it will turn on this option. This pillar variable
can even be set at runtime. For example:
.. code-block:: bash
salt myminion state.sls docker_stuff pillar="{docker.force: True}"
If this pillar variable is present and set to ``False``, then it
will turn off this option.
For more granular control, setting a pillar variable named
``docker.force.image_name`` will affect only the named image.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if not name and not images:
ret["comment"] = "One of 'name' and 'images' must be provided"
return ret
elif images is not None:
targets = images
elif name:
targets = [name]
to_delete = []
for target in targets:
resolved_tag = __salt__["docker.resolve_tag"](target)
if resolved_tag is not False:
to_delete.append(resolved_tag)
if not to_delete:
ret["result"] = True
if len(targets) == 1:
ret["comment"] = "Image {} is not present".format(name)
else:
ret["comment"] = "All specified images are not present"
return ret
if __opts__["test"]:
ret["result"] = None
if len(to_delete) == 1:
ret["comment"] = "Image {} will be removed".format(to_delete[0])
else:
ret["comment"] = "The following images will be removed: {}".format(
", ".join(to_delete)
)
return ret
result = __salt__["docker.rmi"](*to_delete, force=force)
post_tags = __salt__["docker.list_tags"]()
failed = [x for x in to_delete if x in post_tags]
if failed:
if [x for x in to_delete if x not in post_tags]:
ret["changes"] = result
ret["comment"] = "The following image(s) failed to be removed: {}".format(
", ".join(failed)
)
else:
ret["comment"] = "None of the specified images were removed"
if "Errors" in result:
ret["comment"] += ". The following errors were encountered: {}".format(
"; ".join(result["Errors"])
)
else:
ret["changes"] = result
if len(to_delete) == 1:
ret["comment"] = "Image {} was removed".format(to_delete[0])
else:
ret["comment"] = "The following images were removed: {}".format(
", ".join(to_delete)
)
ret["result"] = True
return ret
def mod_watch(name, sfun=None, **kwargs):
"""
The docker_image watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
"""
if sfun == "present":
# Force image to be updated
kwargs["force"] = True
return present(name, **kwargs)
return {
"name": name,
"changes": {},
"result": False,
"comment": "watch requisite is not implemented for {}".format(sfun),
} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/docker_image.py | 0.743634 | 0.166879 | docker_image.py | pypi |
import logging
import os
import salt.utils.path
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "acl"
def __virtual__():
"""
Ensure getfacl & setfacl exist
"""
if salt.utils.path.which("getfacl") and salt.utils.path.which("setfacl"):
return __virtualname__
return (
False,
"The linux_acl state cannot be loaded: the getfacl or setfacl binary is not in"
" the path.",
)
def present(name, acl_type, acl_name="", perms="", recurse=False, force=False):
"""
Ensure a Linux ACL is present
name
The acl path
acl_type
The type of the acl is used for it can be 'user' or 'group'
acl_name
The user or group
perms
Set the permissions eg.: rwx
recurse
Set the permissions recursive in the path
force
Wipe out old permissions and ensure only the new permissions are set
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
_octal = {"r": 4, "w": 2, "x": 1, "-": 0}
_octal_lookup = {0: "-", 1: "r", 2: "w", 4: "x"}
if not os.path.exists(name):
ret["comment"] = "{} does not exist".format(name)
ret["result"] = False
return ret
__current_perms = __salt__["acl.getfacl"](name, recursive=recurse)
if acl_type.startswith(("d:", "default:")):
_acl_type = ":".join(acl_type.split(":")[1:])
_current_perms = __current_perms[name].get("defaults", {})
_default = True
else:
_acl_type = acl_type
_current_perms = __current_perms[name]
_default = False
# The getfacl execution module lists default with empty names as being
# applied to the user/group that owns the file, e.g.,
# default:group::rwx would be listed as default:group:root:rwx
# In this case, if acl_name is empty, we really want to search for root
# but still uses '' for other
# We search through the dictionary getfacl returns for the owner of the
# file if acl_name is empty.
if acl_name == "":
_search_name = __current_perms[name].get("comment").get(_acl_type, "")
else:
_search_name = acl_name
if _current_perms.get(_acl_type, None) or _default:
try:
user = [
i
for i in _current_perms[_acl_type]
if next(iter(i.keys())) == _search_name
].pop()
except (AttributeError, IndexError, StopIteration, KeyError):
user = None
if user:
octal_sum = sum(_octal.get(i, i) for i in perms)
need_refresh = False
# If recursive check all paths retrieved via acl.getfacl
if recurse:
for path in __current_perms:
acl_found = False
if _default:
# Recusive default acls only apply to directories
if not os.path.isdir(path):
continue
_current_perms_path = __current_perms[path].get("defaults", {})
else:
_current_perms_path = __current_perms[path]
for user_acl in _current_perms_path.get(_acl_type, []):
if (
_search_name in user_acl
and user_acl[_search_name]["octal"] == octal_sum
):
acl_found = True
if not acl_found:
need_refresh = True
break
# Check the permissions from the already located file
elif user[_search_name]["octal"] == sum(_octal.get(i, i) for i in perms):
need_refresh = False
# If they don't match then refresh
else:
need_refresh = True
if not need_refresh:
ret["comment"] = "Permissions are in the desired state"
else:
_num = user[_search_name]["octal"]
new_perms = "{}{}{}".format(
_octal_lookup[_num & 1],
_octal_lookup[_num & 2],
_octal_lookup[_num & 4],
)
changes = {
"new": {"acl_name": acl_name, "acl_type": acl_type, "perms": perms},
"old": {
"acl_name": acl_name,
"acl_type": acl_type,
"perms": new_perms,
},
}
if __opts__["test"]:
ret.update(
{
"comment": (
"Updated permissions will be applied for "
"{}: {} -> {}".format(acl_name, new_perms, perms)
),
"result": None,
"changes": changes,
}
)
return ret
try:
if force:
__salt__["acl.wipefacls"](
name, recursive=recurse, raise_err=True
)
__salt__["acl.modfacl"](
acl_type,
acl_name,
perms,
name,
recursive=recurse,
raise_err=True,
)
ret.update(
{
"comment": "Updated permissions for {}".format(acl_name),
"result": True,
"changes": changes,
}
)
except CommandExecutionError as exc:
ret.update(
{
"comment": "Error updating permissions for {}: {}".format(
acl_name, exc.strerror
),
"result": False,
}
)
else:
changes = {
"new": {"acl_name": acl_name, "acl_type": acl_type, "perms": perms}
}
if __opts__["test"]:
ret.update(
{
"comment": "New permissions will be applied for {}: {}".format(
acl_name, perms
),
"result": None,
"changes": changes,
}
)
ret["result"] = None
return ret
try:
if force:
__salt__["acl.wipefacls"](name, recursive=recurse, raise_err=True)
__salt__["acl.modfacl"](
acl_type, acl_name, perms, name, recursive=recurse, raise_err=True
)
ret.update(
{
"comment": "Applied new permissions for {}".format(acl_name),
"result": True,
"changes": changes,
}
)
except CommandExecutionError as exc:
ret.update(
{
"comment": "Error updating permissions for {}: {}".format(
acl_name, exc.strerror
),
"result": False,
}
)
else:
ret["comment"] = "ACL Type does not exist"
ret["result"] = False
return ret
def absent(name, acl_type, acl_name="", perms="", recurse=False):
"""
Ensure a Linux ACL does not exist
name
The acl path
acl_type
The type of the acl is used for, it can be 'user' or 'group'
acl_name
The user or group
perms
Remove the permissions eg.: rwx
recurse
Set the permissions recursive in the path
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
if not os.path.exists(name):
ret["comment"] = "{} does not exist".format(name)
ret["result"] = False
return ret
__current_perms = __salt__["acl.getfacl"](name, recursive=recurse)
if acl_type.startswith(("d:", "default:")):
_acl_type = ":".join(acl_type.split(":")[1:])
_current_perms = __current_perms[name].get("defaults", {})
_default = True
else:
_acl_type = acl_type
_current_perms = __current_perms[name]
_default = False
# The getfacl execution module lists default with empty names as being
# applied to the user/group that owns the file, e.g.,
# default:group::rwx would be listed as default:group:root:rwx
# In this case, if acl_name is empty, we really want to search for root
# but still uses '' for other
# We search through the dictionary getfacl returns for the owner of the
# file if acl_name is empty.
if acl_name == "":
_search_name = __current_perms[name].get("comment").get(_acl_type, "")
else:
_search_name = acl_name
if _current_perms.get(_acl_type, None) or _default:
try:
user = [
i
for i in _current_perms[_acl_type]
if next(iter(i.keys())) == _search_name
].pop()
except (AttributeError, IndexError, StopIteration, KeyError):
user = None
need_refresh = False
for path in __current_perms:
acl_found = False
for user_acl in __current_perms[path].get(_acl_type, []):
if _search_name in user_acl:
acl_found = True
break
if acl_found:
need_refresh = True
break
if user or need_refresh:
ret["comment"] = "Removing permissions"
if __opts__["test"]:
ret["result"] = None
return ret
__salt__["acl.delfacl"](acl_type, acl_name, perms, name, recursive=recurse)
else:
ret["comment"] = "Permissions are in the desired state"
else:
ret["comment"] = "ACL Type does not exist"
ret["result"] = False
return ret
def list_present(name, acl_type, acl_names=None, perms="", recurse=False, force=False):
"""
Ensure a Linux ACL list is present
Takes a list of acl names and add them to the given path
name
The acl path
acl_type
The type of the acl is used for it can be 'user' or 'group'
acl_names
The list of users or groups
perms
Set the permissions eg.: rwx
recurse
Set the permissions recursive in the path
force
Wipe out old permissions and ensure only the new permissions are set
"""
if acl_names is None:
acl_names = []
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
_octal = {"r": 4, "w": 2, "x": 1, "-": 0}
_octal_perms = sum(_octal.get(i, i) for i in perms)
if not os.path.exists(name):
ret["comment"] = "{} does not exist".format(name)
ret["result"] = False
return ret
__current_perms = __salt__["acl.getfacl"](name)
if acl_type.startswith(("d:", "default:")):
_acl_type = ":".join(acl_type.split(":")[1:])
_current_perms = __current_perms[name].get("defaults", {})
_default = True
else:
_acl_type = acl_type
_current_perms = __current_perms[name]
_default = False
_origin_group = _current_perms.get("comment", {}).get("group", None)
_origin_owner = _current_perms.get("comment", {}).get("owner", None)
_current_acl_types = []
diff_perms = False
for key in _current_perms[acl_type]:
for current_acl_name in key.keys():
_current_acl_types.append(current_acl_name.encode("utf-8"))
diff_perms = _octal_perms == key[current_acl_name]["octal"]
if acl_type == "user":
try:
_current_acl_types.remove(_origin_owner)
except ValueError:
pass
else:
try:
_current_acl_types.remove(_origin_group)
except ValueError:
pass
diff_acls = set(_current_acl_types) ^ set(acl_names)
if not diff_acls and diff_perms and not force:
ret = {
"name": name,
"result": True,
"changes": {},
"comment": "Permissions and {}s are in the desired state".format(
acl_type
),
}
return ret
# The getfacl execution module lists default with empty names as being
# applied to the user/group that owns the file, e.g.,
# default:group::rwx would be listed as default:group:root:rwx
# In this case, if acl_names is empty, we really want to search for root
# but still uses '' for other
# We search through the dictionary getfacl returns for the owner of the
# file if acl_names is empty.
if acl_names == "":
_search_names = __current_perms[name].get("comment").get(_acl_type, "")
else:
_search_names = acl_names
if _current_perms.get(_acl_type, None) or _default:
try:
users = {}
for i in _current_perms[_acl_type]:
if i and next(iter(i.keys())) in _search_names:
users.update(i)
except (AttributeError, KeyError):
users = None
if users:
changes = {}
for count, search_name in enumerate(_search_names):
if search_name in users:
if users[search_name]["octal"] == sum(
_octal.get(i, i) for i in perms
):
ret["comment"] = "Permissions are in the desired state"
else:
changes.update(
{
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": _octal_perms,
},
"old": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": str(users[search_name]["octal"]),
},
}
)
if __opts__["test"]:
ret.update(
{
"comment": (
"Updated permissions will be applied for "
"{}: {} -> {}".format(
acl_names,
str(users[search_name]["octal"]),
perms,
)
),
"result": None,
"changes": changes,
}
)
return ret
try:
if force:
__salt__["acl.wipefacls"](
name, recursive=recurse, raise_err=True
)
for acl_name in acl_names:
__salt__["acl.modfacl"](
acl_type,
acl_name,
perms,
name,
recursive=recurse,
raise_err=True,
)
ret.update(
{
"comment": "Updated permissions for {}".format(
acl_names
),
"result": True,
"changes": changes,
}
)
except CommandExecutionError as exc:
ret.update(
{
"comment": (
"Error updating permissions for {}: {}".format(
acl_names, exc.strerror
)
),
"result": False,
}
)
else:
changes = {
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": perms,
}
}
if __opts__["test"]:
ret.update(
{
"comment": (
"New permissions will be applied for {}: {}".format(
acl_names, perms
)
),
"result": None,
"changes": changes,
}
)
ret["result"] = None
return ret
try:
if force:
__salt__["acl.wipefacls"](
name, recursive=recurse, raise_err=True
)
for acl_name in acl_names:
__salt__["acl.modfacl"](
acl_type,
acl_name,
perms,
name,
recursive=recurse,
raise_err=True,
)
ret.update(
{
"comment": "Applied new permissions for {}".format(
", ".join(acl_names)
),
"result": True,
"changes": changes,
}
)
except CommandExecutionError as exc:
ret.update(
{
"comment": (
"Error updating permissions for {}: {}".format(
acl_names, exc.strerror
)
),
"result": False,
}
)
else:
changes = {
"new": {
"acl_name": ", ".join(acl_names),
"acl_type": acl_type,
"perms": perms,
}
}
if __opts__["test"]:
ret.update(
{
"comment": "New permissions will be applied for {}: {}".format(
acl_names, perms
),
"result": None,
"changes": changes,
}
)
ret["result"] = None
return ret
try:
if force:
__salt__["acl.wipefacls"](name, recursive=recurse, raise_err=True)
for acl_name in acl_names:
__salt__["acl.modfacl"](
acl_type,
acl_name,
perms,
name,
recursive=recurse,
raise_err=True,
)
ret.update(
{
"comment": "Applied new permissions for {}".format(
", ".join(acl_names)
),
"result": True,
"changes": changes,
}
)
except CommandExecutionError as exc:
ret.update(
{
"comment": "Error updating permissions for {}: {}".format(
acl_names, exc.strerror
),
"result": False,
}
)
else:
ret["comment"] = "ACL Type does not exist"
ret["result"] = False
return ret
def list_absent(name, acl_type, acl_names=None, recurse=False):
"""
Ensure a Linux ACL list does not exist
Takes a list of acl names and remove them from the given path
name
The acl path
acl_type
The type of the acl is used for, it can be 'user' or 'group'
acl_names
The list of users or groups
perms
Remove the permissions eg.: rwx
recurse
Set the permissions recursive in the path
"""
if acl_names is None:
acl_names = []
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
if not os.path.exists(name):
ret["comment"] = "{} does not exist".format(name)
ret["result"] = False
return ret
__current_perms = __salt__["acl.getfacl"](name)
if acl_type.startswith(("d:", "default:")):
_acl_type = ":".join(acl_type.split(":")[1:])
_current_perms = __current_perms[name].get("defaults", {})
_default = True
else:
_acl_type = acl_type
_current_perms = __current_perms[name]
_default = False
# The getfacl execution module lists default with empty names as being
# applied to the user/group that owns the file, e.g.,
# default:group::rwx would be listed as default:group:root:rwx
# In this case, if acl_names is empty, we really want to search for root
# but still uses '' for other
# We search through the dictionary getfacl returns for the owner of the
# file if acl_names is empty.
if not acl_names:
_search_names = set(__current_perms[name].get("comment").get(_acl_type, ""))
else:
_search_names = set(acl_names)
if _current_perms.get(_acl_type, None) or _default:
try:
users = {}
for i in _current_perms[_acl_type]:
if i and next(iter(i.keys())) in _search_names:
users.update(i)
except (AttributeError, KeyError):
users = None
if users:
ret["comment"] = "Removing permissions"
if __opts__["test"]:
ret["result"] = None
return ret
for acl_name in acl_names:
__salt__["acl.delfacl"](acl_type, acl_name, name, recursive=recurse)
else:
ret["comment"] = "Permissions are in the desired state"
else:
ret["comment"] = "ACL Type does not exist"
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/linux_acl.py | 0.496338 | 0.200401 | linux_acl.py | pypi |
import logging
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
"""
Provide ethtool state
"""
if "ethtool.show_driver" in __salt__:
return "ethtool"
return (False, "ethtool module could not be loaded")
def coalesce(name, **kwargs):
"""
Manage coalescing settings of network device
name
Interface name to apply coalescing settings
.. code-block:: yaml
eth0:
ethtool.coalesce:
- name: eth0
- adaptive_rx: on
- adaptive_tx: on
- rx_usecs: 24
- rx_frame: 0
- rx_usecs_irq: 0
- rx_frames_irq: 0
- tx_usecs: 48
- tx_frames: 0
- tx_usecs_irq: 0
- tx_frames_irq: 0
- stats_block_usecs: 0
- pkt_rate_low: 0
- rx_usecs_low: 0
- rx_frames_low: 0
- tx_usecs_low: 0
- tx_frames_low: 0
- pkt_rate_high: 0
- rx_usecs_high: 0
- rx_frames_high: 0
- tx_usecs_high: 0
- tx_frames_high: 0
- sample_interval: 0
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Network device {} coalescing settings are up to date.".format(name),
}
apply_coalescing = False
if "test" not in kwargs:
kwargs["test"] = __opts__.get("test", False)
# Build coalescing settings
try:
old = __salt__["ethtool.show_coalesce"](name)
if not isinstance(old, dict):
ret["result"] = False
ret["comment"] = "Device {} coalescing settings are not supported".format(
name
)
return ret
new = {}
diff = []
# Retreive changes to made
for key, value in kwargs.items():
if key in old and value != old[key]:
new.update({key: value})
diff.append("{}: {}".format(key, value))
# Dry run
if kwargs["test"]:
if not new:
return ret
if new:
ret["result"] = None
ret[
"comment"
] = "Device {} coalescing settings are set to be updated:\n{}".format(
name, "\n".join(diff)
)
return ret
# Prepare return output
if new:
apply_coalescing = True
ret["comment"] = "Device {} coalescing settings updated.".format(name)
ret["changes"]["ethtool_coalesce"] = "\n".join(diff)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
# Apply coalescing settings
if apply_coalescing:
try:
__salt__["ethtool.set_coalesce"](name, **new)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
return ret
def ring(name, **kwargs):
"""
Manage rx/tx ring parameters of network device
Use 'max' word to set with factory maximum
name
Interface name to apply ring parameters
.. code-block:: yaml
eth0:
ethtool.ring:
- name: eth0
- rx: 1024
- rx_mini: 0
- rx_jumbo: 0
- tx: max
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Network device {} ring parameters are up to date.".format(name),
}
apply_ring = False
if "test" not in kwargs:
kwargs["test"] = __opts__.get("test", False)
# Build ring parameters
try:
old = __salt__["ethtool.show_ring"](name)
if not isinstance(old, dict):
ret["result"] = False
ret["comment"] = "Device {} ring parameters are not supported".format(name)
return ret
new = {}
diff = []
# Retreive changes to made
for key, value in kwargs.items():
if key in old:
if value == "max":
value = old["{}_max".format(key)]
if value != old[key]:
new.update({key: value})
diff.append("{}: {}".format(key, value))
# Dry run
if kwargs["test"]:
if not new:
return ret
if new:
ret["result"] = None
ret[
"comment"
] = "Device {} ring parameters are set to be updated:\n{}".format(
name, "\n".join(diff)
)
return ret
# Prepare return output
if new:
apply_ring = True
ret["comment"] = "Device {} ring parameters updated.".format(name)
ret["changes"]["ethtool_ring"] = "\n".join(diff)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
# Apply ring parameters
if apply_ring:
try:
__salt__["ethtool.set_ring"](name, **new)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
return ret
def offload(name, **kwargs):
"""
Manage protocol offload and other features of network device
name
Interface name to apply coalescing settings
.. code-block:: yaml
eth0:
ethtool.offload:
- name: eth0
- tcp_segmentation_offload: on
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Network device {} offload settings are up to date.".format(name),
}
apply_offload = False
if "test" not in kwargs:
kwargs["test"] = __opts__.get("test", False)
# Build offload settings
try:
old = __salt__["ethtool.show_offload"](name)
if not isinstance(old, dict):
ret["result"] = False
ret["comment"] = "Device {} offload settings are not supported".format(name)
return ret
new = {}
diff = []
# Retreive changes to made
for key, value in kwargs.items():
value = value and "on" or "off"
if key in old and value != old[key]:
new.update({key: value})
diff.append("{}: {}".format(key, value))
# Dry run
if kwargs["test"]:
if not new:
return ret
if new:
ret["result"] = None
ret[
"comment"
] = "Device {} offload settings are set to be updated:\n{}".format(
name, "\n".join(diff)
)
return ret
# Prepare return output
if new:
apply_offload = True
ret["comment"] = "Device {} offload settings updated.".format(name)
ret["changes"]["ethtool_offload"] = "\n".join(diff)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
# Apply offload settings
if apply_offload:
try:
__salt__["ethtool.set_offload"](name, **new)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/ethtool.py | 0.569374 | 0.161949 | ethtool.py | pypi |
import logging
import salt.utils.dictdiffer
import salt.utils.dictupdate as dictupdate
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_iam.role_exists" in __salt__:
return "boto_iam_role"
return (False, "boto_iam module could not be loaded")
def present(
name,
policy_document=None,
policy_document_from_pillars=None,
path=None,
policies=None,
policies_from_pillars=None,
managed_policies=None,
create_instance_profile=True,
region=None,
key=None,
keyid=None,
profile=None,
delete_policies=True,
):
"""
Ensure the IAM role exists.
name
Name of the IAM role.
policy_document
The policy that grants an entity permission to assume the role.
(See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)
policy_document_from_pillars
A pillar key that contains a role policy document. The statements
defined here will be appended with the policy document statements
defined in the policy_document argument.
.. versionadded:: 2017.7.0
path
The path to the role/instance profile.
(See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role)
policies
A dict of IAM role policies.
policies_from_pillars
A list of pillars that contain role policy dicts. Policies in the
pillars will be merged in the order defined in the list and key
conflicts will be handled by later defined keys overriding earlier
defined keys. The policies defined here will be merged with the
policies defined in the policies argument. If keys conflict, the keys
in the policies argument will override the keys defined in
policies_from_pillars.
managed_policies
A list of (AWS or Customer) managed policies to be attached to the role.
create_instance_profile
A boolean of whether or not to create an instance profile and associate
it with this role.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
delete_policies
Deletes existing policies that are not in the given list of policies. Default
value is ``True``. If ``False`` is specified, existing policies will not be deleted
allowing manual modifications on the IAM role to be persistent.
.. versionadded:: 2015.8.0
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
# Build up _policy_document
_policy_document = {}
if policy_document_from_pillars:
from_pillars = __salt__["pillar.get"](policy_document_from_pillars)
if from_pillars:
_policy_document["Version"] = from_pillars["Version"]
_policy_document.setdefault("Statement", [])
_policy_document["Statement"].extend(from_pillars["Statement"])
if policy_document:
_policy_document["Version"] = policy_document["Version"]
_policy_document.setdefault("Statement", [])
_policy_document["Statement"].extend(policy_document["Statement"])
_ret = _role_present(name, _policy_document, path, region, key, keyid, profile)
# Build up _policies
if not policies:
policies = {}
if not policies_from_pillars:
policies_from_pillars = []
if not managed_policies:
managed_policies = []
_policies = {}
for policy in policies_from_pillars:
_policy = __salt__["pillar.get"](policy)
_policies.update(_policy)
_policies.update(policies)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
if create_instance_profile:
_ret = _instance_profile_present(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _instance_profile_associated(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _policies_present(
name, _policies, region, key, keyid, profile, delete_policies
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
_ret = _policies_attached(name, managed_policies, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _role_present(
name,
policy_document=None,
path=None,
region=None,
key=None,
keyid=None,
profile=None,
):
ret = {"result": True, "comment": "", "changes": {}}
role = __salt__["boto_iam.describe_role"](name, region, key, keyid, profile)
if not role:
if __opts__["test"]:
ret["comment"] = "IAM role {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_iam.create_role"](
name, policy_document, path, region, key, keyid, profile
)
if created:
ret["changes"]["old"] = {"role": None}
ret["changes"]["new"] = {"role": name}
ret["comment"] = "IAM role {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} IAM role.".format(name)
else:
ret["comment"] = "{} role present.".format(name)
if not policy_document:
_policy_document = __salt__["boto_iam.build_policy"](
region, key, keyid, profile
)
else:
_policy_document = policy_document
if salt.utils.dictdiffer.deep_diff(
_sort_policy(role["assume_role_policy_document"]),
_sort_policy(_policy_document),
):
if __opts__["test"]:
msg = "Assume role policy document to be updated."
ret["comment"] = "{} {}".format(ret["comment"], msg)
ret["result"] = None
return ret
updated = __salt__["boto_iam.update_assume_role_policy"](
name, _policy_document, region, key, keyid, profile
)
if updated:
msg = "Assume role policy document updated."
ret["comment"] = "{} {}".format(ret["comment"], msg)
ret["changes"]["old"] = {
"policy_document": role["assume_role_policy_document"]
}
ret["changes"]["new"] = {"policy_document": _policy_document}
else:
ret["result"] = False
msg = "Failed to update assume role policy."
ret["comment"] = "{} {}".format(ret["comment"], msg)
return ret
def _instance_profile_present(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_iam.instance_profile_exists"](
name, region, key, keyid, profile
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_iam.create_instance_profile"](
name, region, key, keyid, profile
)
if created:
ret["changes"]["old"] = {"instance_profile": None}
ret["changes"]["new"] = {"instance_profile": name}
ret["comment"] = "Instance profile {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} instance profile.".format(name)
return ret
def _instance_profile_associated(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
is_associated = __salt__["boto_iam.profile_associated"](
name, name, region, key, keyid, profile
)
if not is_associated:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be associated.".format(name)
ret["result"] = None
return ret
associated = __salt__["boto_iam.associate_profile_to_role"](
name, name, region, key, keyid, profile
)
if associated:
ret["changes"]["old"] = {"profile_associated": None}
ret["changes"]["new"] = {"profile_associated": True}
ret["comment"] = "Instance profile {} associated.".format(name)
else:
ret["result"] = False
ret[
"comment"
] = "Failed to associate {0} instance profile with {0} role.".format(name)
return ret
def _sort_policy(doc):
"""
List-type sub-items in policies don't happen to be order-sensitive, but
compare operations will render them unequal, leading to non-idempotent
state runs. We'll sort any list-type subitems before comparison to reduce
the likelihood of false negatives.
"""
if isinstance(doc, list):
return sorted(_sort_policy(i) for i in doc)
elif isinstance(doc, (dict, OrderedDict)):
return {k: _sort_policy(v) for k, v in doc.items()}
return doc
def _policies_present(
name,
policies=None,
region=None,
key=None,
keyid=None,
profile=None,
delete_policies=True,
):
ret = {"result": True, "comment": "", "changes": {}}
policies_to_create = {}
policies_to_delete = []
for policy_name, policy in policies.items():
_policy = __salt__["boto_iam.get_role_policy"](
name, policy_name, region, key, keyid, profile
)
if _policy != policy:
policies_to_create[policy_name] = policy
_list = __salt__["boto_iam.list_role_policies"](name, region, key, keyid, profile)
for policy_name in _list:
if delete_policies and policy_name not in policies:
policies_to_delete.append(policy_name)
if policies_to_create or policies_to_delete:
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__["test"]:
ret["comment"] = "{} policies to be modified on role {}.".format(
", ".join(_to_modify), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"policies": _list}
for policy_name, policy in policies_to_create.items():
policy_set = __salt__["boto_iam.create_role_policy"](
name, policy_name, policy, region, key, keyid, profile
)
if not policy_set:
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["result"] = False
ret["comment"] = "Failed to add policy {} to role {}".format(
policy_name, name
)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__["boto_iam.delete_role_policy"](
name, policy_name, region, key, keyid, profile
)
if not policy_unset:
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["result"] = False
ret["comment"] = "Failed to remove policy {} from role {}".format(
policy_name, name
)
return ret
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["comment"] = "{} policies modified on role {}.".format(
", ".join(_list), name
)
return ret
def _policies_attached(
name, managed_policies=None, region=None, key=None, keyid=None, profile=None
):
ret = {"result": True, "comment": "", "changes": {}}
policies_to_attach = []
policies_to_detach = []
for policy in managed_policies or []:
entities = __salt__["boto_iam.list_entities_for_policy"](
policy,
entity_filter="Role",
region=region,
key=key,
keyid=keyid,
profile=profile,
)
found = False
for roledict in entities.get("policy_roles", []):
if name == roledict.get("role_name"):
found = True
break
if not found:
policies_to_attach.append(policy)
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
oldpolicies = [x.get("policy_arn") for x in _list]
for policy_data in _list:
if (
policy_data.get("policy_name") not in managed_policies
and policy_data.get("policy_arn") not in managed_policies
):
policies_to_detach.append(policy_data.get("policy_arn"))
if policies_to_attach or policies_to_detach:
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__["test"]:
ret["comment"] = "{} policies to be modified on role {}.".format(
", ".join(_to_modify), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"managed_policies": oldpolicies}
for policy_name in policies_to_attach:
policy_set = __salt__["boto_iam.attach_role_policy"](
policy_name,
role_name=name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not policy_set:
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["result"] = False
ret["comment"] = "Failed to add policy {} to role {}".format(
policy_name, name
)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__["boto_iam.detach_role_policy"](
policy_name,
role_name=name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not policy_unset:
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["result"] = False
ret["comment"] = "Failed to remove policy {} from role {}".format(
policy_name, name
)
return ret
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
log.debug(newpolicies)
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["comment"] = "{} policies modified on role {}.".format(
", ".join(newpolicies), name
)
return ret
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure the IAM role is deleted.
name
Name of the IAM role.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
_ret = _policies_absent(name, region, key, keyid, profile)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _policies_detached(name, region, key, keyid, profile)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _instance_profile_disassociated(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _instance_profile_absent(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _role_absent(name, region, key, keyid, profile)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _role_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_iam.role_exists"](name, region, key, keyid, profile)
if exists:
if __opts__["test"]:
ret["comment"] = "IAM role {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_iam.delete_role"](name, region, key, keyid, profile)
if deleted:
ret["changes"]["old"] = {"role": name}
ret["changes"]["new"] = {"role": None}
ret["comment"] = "IAM role {} removed.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} iam role.".format(name)
else:
ret["comment"] = "{} role does not exist.".format(name)
return ret
def _instance_profile_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_iam.instance_profile_exists"](
name, region, key, keyid, profile
)
if exists:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_iam.delete_instance_profile"](
name, region, key, keyid, profile
)
if deleted:
ret["changes"]["old"] = {"instance_profile": name}
ret["changes"]["new"] = {"instance_profile": None}
ret["comment"] = "Instance profile {} removed.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} instance profile.".format(name)
else:
ret["comment"] = "{} instance profile does not exist.".format(name)
return ret
def _policies_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
_list = __salt__["boto_iam.list_role_policies"](name, region, key, keyid, profile)
if not _list:
ret["comment"] = "No policies in role {}.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "{} policies to be removed from role {}.".format(
", ".join(_list), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"policies": _list}
for policy_name in _list:
policy_unset = __salt__["boto_iam.delete_role_policy"](
name, policy_name, region, key, keyid, profile
)
if not policy_unset:
_list = __salt__["boto_iam.list_role_policies"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"policies": _list}
ret["result"] = False
ret["comment"] = "Failed to add policy {} to role {}".format(
policy_name, name
)
return ret
_list = __salt__["boto_iam.list_role_policies"](name, region, key, keyid, profile)
ret["changes"]["new"] = {"policies": _list}
ret["comment"] = "{} policies removed from role {}.".format(", ".join(_list), name)
return ret
def _policies_detached(name, region=None, key=None, keyid=None, profile=None):
ret = {"result": True, "comment": "", "changes": {}}
_list = __salt__["boto_iam.list_attached_role_policies"](
role_name=name, region=region, key=key, keyid=keyid, profile=profile
)
oldpolicies = [x.get("policy_arn") for x in _list]
if not _list:
ret["comment"] = "No attached policies in role {}.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "{} policies to be detached from role {}.".format(
", ".join(oldpolicies), name
)
ret["result"] = None
return ret
ret["changes"]["old"] = {"managed_policies": oldpolicies}
for policy_arn in oldpolicies:
policy_unset = __salt__["boto_iam.detach_role_policy"](
policy_arn, name, region=region, key=key, keyid=keyid, profile=profile
)
if not policy_unset:
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["result"] = False
ret["comment"] = "Failed to detach {} from role {}".format(policy_arn, name)
return ret
_list = __salt__["boto_iam.list_attached_role_policies"](
name, region=region, key=key, keyid=keyid, profile=profile
)
newpolicies = [x.get("policy_arn") for x in _list]
ret["changes"]["new"] = {"managed_policies": newpolicies}
ret["comment"] = "{} policies detached from role {}.".format(
", ".join(newpolicies), name
)
return ret
def _instance_profile_disassociated(
name, region=None, key=None, keyid=None, profile=None
):
ret = {"result": True, "comment": "", "changes": {}}
is_associated = __salt__["boto_iam.profile_associated"](
name, name, region, key, keyid, profile
)
if is_associated:
if __opts__["test"]:
ret["comment"] = "Instance profile {} is set to be disassociated.".format(
name
)
ret["result"] = None
return ret
associated = __salt__["boto_iam.disassociate_profile_from_role"](
name, name, region, key, keyid, profile
)
if associated:
ret["changes"]["old"] = {"profile_associated": True}
ret["changes"]["new"] = {"profile_associated": False}
ret["comment"] = "Instance profile {} disassociated.".format(name)
else:
ret["result"] = False
ret[
"comment"
] = "Failed to disassociate {0} instance profile from {0} role.".format(
name
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_iam_role.py | 0.531696 | 0.155078 | boto_iam_role.py | pypi |
def __virtual__():
if "libcloud_dns.list_zones" in __salt__:
return True
return (False, "libcloud_dns module could not be loaded")
def state_result(result, message, name, changes=None):
if changes is None:
changes = {}
return {"result": result, "comment": message, "name": name, "changes": changes}
def zone_present(domain, type, profile):
"""
Ensures a record is present.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param type: Zone type (master / slave), defaults to master
:type type: ``str``
:param profile: The profile key
:type profile: ``str``
"""
zones = __salt__["libcloud_dns.list_zones"](profile)
if not type:
type = "master"
matching_zone = [z for z in zones if z["domain"] == domain]
if len(matching_zone) > 0:
return state_result(True, "Zone already exists", domain)
else:
result = __salt__["libcloud_dns.create_zone"](domain, profile, type)
return state_result(True, "Created new zone", domain, result)
def zone_absent(domain, profile):
"""
Ensures a record is absent.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
"""
zones = __salt__["libcloud_dns.list_zones"](profile)
matching_zone = [z for z in zones if z["domain"] == domain]
if len(matching_zone) == 0:
return state_result(True, "Zone already absent", domain)
else:
result = __salt__["libcloud_dns.delete_zone"](matching_zone[0]["id"], profile)
return state_result(result, "Deleted zone", domain)
def record_present(name, zone, type, data, profile):
"""
Ensures a record is present.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created, the domain name
:type zone: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: ``str``
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param profile: The profile key
:type profile: ``str``
"""
zones = __salt__["libcloud_dns.list_zones"](profile)
try:
matching_zone = [z for z in zones if z["domain"] == zone][0]
except IndexError:
return state_result(False, "Could not locate zone", name)
records = __salt__["libcloud_dns.list_records"](matching_zone["id"], profile)
matching_records = [
record
for record in records
if record["name"] == name and record["type"] == type and record["data"] == data
]
if len(matching_records) == 0:
result = __salt__["libcloud_dns.create_record"](
name, matching_zone["id"], type, data, profile
)
return state_result(True, "Created new record", name, result)
else:
return state_result(True, "Record already exists", name)
def record_absent(name, zone, type, data, profile):
"""
Ensures a record is absent.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created, the domain name
:type zone: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: ``str``
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param profile: The profile key
:type profile: ``str``
"""
zones = __salt__["libcloud_dns.list_zones"](profile)
try:
matching_zone = [z for z in zones if z["domain"] == zone][0]
except IndexError:
return state_result(False, "Zone could not be found", name)
records = __salt__["libcloud_dns.list_records"](matching_zone["id"], profile)
matching_records = [
record
for record in records
if record["name"] == name and record["type"] == type and record["data"] == data
]
if len(matching_records) > 0:
result = []
for record in matching_records:
result.append(
__salt__["libcloud_dns.delete_record"](
matching_zone["id"], record["id"], profile
)
)
return state_result(all(result), "Removed {} records".format(len(result)), name)
else:
return state_result(True, "Records already absent", name) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/libcloud_dns.py | 0.631935 | 0.3492 | libcloud_dns.py | pypi |
def __virtual__():
"""
Only load if the influxdb module is available
"""
if "influxdb.db_exists" in __salt__:
return "influxdb_database"
return (False, "influxdb module could not be loaded")
def present(name, **client_args):
"""
Ensure that given database is present.
name
Name of the database to create.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Database {} is already present".format(name),
}
if not __salt__["influxdb.db_exists"](name, **client_args):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is absent and will be created".format(name)
return ret
if __salt__["influxdb.create_db"](name, **client_args):
ret["comment"] = "Database {} has been created".format(name)
ret["changes"][name] = "Present"
return ret
else:
ret["comment"] = "Failed to create database {}".format(name)
ret["result"] = False
return ret
return ret
def absent(name, **client_args):
"""
Ensure that given database is absent.
name
Name of the database to remove.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Database {} is not present".format(name),
}
if __salt__["influxdb.db_exists"](name, **client_args):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is present and needs to be removed".format(
name
)
return ret
if __salt__["influxdb.drop_db"](name, **client_args):
ret["comment"] = "Database {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "Failed to remove database {}".format(name)
ret["result"] = False
return ret
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/influxdb_database.py | 0.563978 | 0.164684 | influxdb_database.py | pypi |
import salt.utils.path
from salt.exceptions import CommandExecutionError, CommandNotFoundError
def __virtual__():
"""
Only work when cabal-install is installed.
"""
if (salt.utils.path.which("cabal") is not None) and (
salt.utils.path.which("ghc-pkg") is not None
):
return True
return (False, "cabal or ghc-pkg commands not found")
def _parse_pkg_string(pkg):
"""
Parse pkg string and return a tuple of package name, separator, and
package version.
Cabal support install package with following format:
* foo-1.0
* foo < 1.2
* foo > 1.3
For the sake of simplicity only the first form is supported,
support for other forms can be added later.
"""
pkg_name, separator, pkg_ver = pkg.partition("-")
return (pkg_name.strip(), separator, pkg_ver.strip())
def installed(name, pkgs=None, user=None, install_global=False, env=None):
"""
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
ShellCheck-0.3.5:
cabal:
- installed:
name
The package to install
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
try:
call = __salt__["cabal.update"](user=user, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Could not run cabal update {}".format(err)
return ret
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__["cabal.list"](user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error looking up '{}': {}".format(name, err)
return ret
pkgs_satisfied = []
pkgs_to_install = []
for pkg in pkg_list:
pkg_name, _, pkg_ver = _parse_pkg_string(pkg)
if pkg_name not in installed_pkgs:
pkgs_to_install.append(pkg)
else:
if pkg_ver: # version is specified
if installed_pkgs[pkg_name] != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(pkg)
else:
pkgs_satisfied.append(pkg)
if __opts__["test"]:
ret["result"] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append(
"Packages(s) '{}' are set to be installed".format(
", ".join(pkgs_to_install)
)
)
if pkgs_satisfied:
comment_msg.append(
"Packages(s) '{}' satisfied by {}".format(
", ".join(pkg_list), ", ".join(pkgs_satisfied)
)
)
ret["comment"] = ". ".join(comment_msg)
return ret
if not pkgs_to_install:
ret["result"] = True
ret["comment"] = "Packages(s) '{}' satisfied by {}".format(
", ".join(pkg_list), ", ".join(pkgs_satisfied)
)
return ret
try:
call = __salt__["cabal.install"](
pkgs=pkg_list, user=user, install_global=install_global, env=env
)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error installing '{}': {}".format(", ".join(pkg_list), err)
return ret
if call and isinstance(call, dict):
ret["result"] = True
ret["changes"] = {"old": [], "new": pkgs_to_install}
ret["comment"] = "Packages(s) '{}' successfully installed".format(
", ".join(pkgs_to_install)
)
else:
ret["result"] = False
ret["comment"] = "Could not install packages(s) '{}'".format(
", ".join(pkg_list)
)
return ret
def removed(name, user=None, env=None):
"""
Verify that given package is not installed.
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
try:
installed_pkgs = __salt__["cabal.list"](user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error looking up '{}': {}".format(name, err)
if name not in installed_pkgs:
ret["result"] = True
ret["comment"] = "Package '{}' is not installed".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Package '{}' is set to be removed".format(name)
return ret
if __salt__["cabal.uninstall"](pkg=name, user=user, env=env):
ret["result"] = True
ret["changes"][name] = "Removed"
ret["comment"] = "Package '{}' was successfully removed".format(name)
else:
ret["result"] = False
ret["comment"] = "Error removing package '{}'".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/cabal.py | 0.530236 | 0.161485 | cabal.py | pypi |
import logging
from functools import wraps
log = logging.getLogger()
def resultdecorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
ret = function(*args, **kwargs)
ret["result"] = ret["changes"]["out"]
return ret
return wrapper
@resultdecorator
def rpc(name, dest=None, format="xml", args=None, **kwargs):
"""
Executes the given rpc. The returned data can be stored in a file
by specifying the destination path with dest as an argument
.. code-block:: yaml
get-interface-information:
junos.rpc:
- dest: /home/user/rpc.log
- interface_name: lo0
fetch interface information with terse:
junos.rpc:
- name: get-interface-information
- terse: True
Parameters:
Required
* name:
The rpc to be executed. (default = None)
Optional
* dest:
Destination file where the rpc output is stored. (default = None)
Note that the file will be stored on the proxy minion. To push the
files to the master use the salt's following execution module: \
:py:func:`cp.push <salt.modules.cp.push>`
* format:
The format in which the rpc reply must be stored in file specified in the dest
(used only when dest is specified) (default = xml)
* kwargs: keyworded arguments taken by rpc call like-
* timeout: 30
Set NETCONF RPC timeout. Can be used for commands which
take a while to execute. (default= 30 seconds)
* filter:
Only to be used with 'get-config' rpc to get specific configuration.
* terse:
Amount of information you want.
* interface_name:
Name of the interface whose information you want.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if args is not None:
ret["changes"] = __salt__["junos.rpc"](
name, dest=dest, format=format, args=args, **kwargs
)
else:
ret["changes"] = __salt__["junos.rpc"](name, dest=dest, format=format, **kwargs)
return ret
@resultdecorator
def set_hostname(name, **kwargs):
"""
Changes the hostname of the device.
.. code-block:: yaml
device_name:
junos.set_hostname:
- comment: "Host-name set via saltstack."
Parameters:
Required
* name: The name to be set. (default = None)
Optional
* kwargs: Keyworded arguments which can be provided like-
* timeout:
Set NETCONF RPC timeout. Can be used for commands
which take a while to execute. (default = 30 seconds)
* comment:
Provide a comment to the commit. (default = None)
* confirm:
Provide time in minutes for commit confirmation. \
If this option is specified, the commit will be rollbacked in \
the given time unless the commit is confirmed.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.set_hostname"](name, **kwargs)
return ret
@resultdecorator
def commit(name, **kwargs):
"""
Commits the changes loaded into the candidate configuration.
.. code-block:: yaml
commit the changes:
junos.commit:
- confirm: 10
Parameters:
Optional
* kwargs: Keyworded arguments which can be provided like-
* timeout:
Set NETCONF RPC timeout. Can be used for commands which take a \
while to execute. (default = 30 seconds)
* comment:
Provide a comment to the commit. (default = None)
* confirm:
Provide time in minutes for commit confirmation. If this option \
is specified, the commit will be rollbacked in the given time \
unless the commit is confirmed.
* sync:
On dual control plane systems, requests that the candidate\
configuration on one control plane be copied to the other \
control plane,checked for correct syntax, and committed on \
both Routing Engines. (default = False)
* force_sync:
On dual control plane systems, force the candidate configuration
on one control plane to be copied to the other control plane.
* full:
When set to True requires all the daemons to check and evaluate \
the new configuration.
* detail:
When true return commit detail.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.commit"](**kwargs)
return ret
@resultdecorator
def rollback(name, d_id, **kwargs):
"""
Rollbacks the committed changes.
.. code-block:: yaml
rollback the changes:
junos.rollback:
- id: 5
Parameters:
Optional
* id:
* d_id:
The rollback id value [0-49]. (default = 0)
(this variable cannot be named `id`, it conflicts
with the state compiler's internal id)
* kwargs: Keyworded arguments which can be provided like-
* timeout:
Set NETCONF RPC timeout. Can be used for commands which
take a while to execute. (default = 30 seconds)
* comment:
Provide a comment to the commit. (default = None)
* confirm:
Provide time in minutes for commit confirmation. If this option \
is specified, the commit will be rollbacked in the given time \
unless the commit is confirmed.
* diffs_file:
Path to the file where any diffs will be written. (default = None)
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.rollback"](d_id=d_id, **kwargs)
return ret
@resultdecorator
def diff(name, d_id=0, **kwargs):
"""
.. versionchanged:: 3001
Gets the difference between the candidate and the current configuration.
.. code-block:: yaml
get the diff:
junos.diff:
- d_id: 10
Parameters:
Optional
* d_id:
The rollback diff id (d_id) value [0-49]. (default = 0)
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.diff"](id=d_id, **kwargs)
return ret
@resultdecorator
def cli(name, **kwargs):
"""
Executes the CLI commands and reuturns the text output.
.. code-block:: yaml
show version:
junos.cli:
- format: xml
get software version of device:
junos.cli:
- name: show version
- format: text
- dest: /home/user/show_version.log
Parameters:
Required
* name:
The command that need to be executed on Junos CLI. (default = None)
Optional
* kwargs: Keyworded arguments which can be provided like-
* format:
Format in which to get the CLI output. (text or xml, \
default = 'text')
* timeout:
Set NETCONF RPC timeout. Can be used for commands which
take a while to execute. (default = 30 seconds)
* dest:
The destination file where the CLI output can be stored.\
(default = None)
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.cli"](name, **kwargs)
return ret
@resultdecorator
def shutdown(name, **kwargs):
"""
Shuts down the device.
.. code-block:: yaml
shut the device:
junos.shutdown:
- in_min: 10
Parameters:
Optional
* kwargs:
* reboot:
Whether to reboot instead of shutdown. (default=False)
* at:
Specify time for reboot. (To be used only if reboot=yes)
* in_min:
Specify delay in minutes for shutdown
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.shutdown"](**kwargs)
return ret
@resultdecorator
def install_config(name, **kwargs):
"""
Loads and commits the configuration provided.
.. code-block:: yaml
Install the mentioned config:
junos.install_config:
- name: salt://configs/interface.set
- timeout: 100
- diffs_file: '/var/log/diff'
.. code-block:: yaml
Install the mentioned config:
junos.install_config:
- path: salt://configs/interface.set
- timeout: 100
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands
template_vars
The dictionary of data for the jinja variables present in the jinja
template
timeout : 30
Set NETCONF RPC timeout. Can be used for commands which take a while to
execute.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file. Sets action to override
.. note:: This option cannot be used if **format** is "set".
merge : False
If set to ``True`` will set the load-config action to merge.
the default load-config action is 'replace' for xml/json/text config
comment
Provide a comment to the commit. (default = None)
confirm
Provide time in minutes for commit confirmation. If this option is
specified, the commit will be rolled back in the given time unless the
commit is confirmed.
diffs_file
Path to the file where the diff (difference in old configuration and the
committed configuration) will be stored.
.. note::
The file will be stored on the proxy minion. To push the files to the
master use :py:func:`cp.push <salt.modules.cp.push>`.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.install_config"](name, **kwargs)
return ret
@resultdecorator
def zeroize(name):
"""
Resets the device to default factory settings.
.. code-block:: yaml
reset my device:
junos.zeroize
name: can be anything
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.zeroize"]()
return ret
@resultdecorator
def install_os(name, **kwargs):
"""
Installs the given image on the device. After the installation is complete
the device is rebooted, if reboot=True is given as a keyworded argument.
.. code-block:: yaml
salt://images/junos_image.tgz:
junos.install_os:
- timeout: 100
- reboot: True
Parameters:
Required
* name:
Path where the image file is present on the pro\
xy minion.
Optional
* kwargs: keyworded arguments to be given such as timeout, reboot etc
* timeout:
Set NETCONF RPC timeout. Can be used to RPCs which
take a while to execute. (default = 30 seconds)
* reboot:
Whether to reboot after installation (default = False)
* no_copy:
When True the software package will not be SCP’d to the device. \
(default = False)
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.install_os"](name, **kwargs)
return ret
@resultdecorator
def file_copy(name, dest=None, **kwargs):
"""
Copies the file from the local device to the junos device.
.. code-block:: yaml
/home/m2/info.txt:
junos.file_copy:
- dest: info_copy.txt
Parameters:
Required
* name:
The sorce path where the file is kept.
* dest:
The destination path where the file will be copied.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.file_copy"](name, dest, **kwargs)
return ret
@resultdecorator
def lock(name):
"""
Attempts an exclusive lock on the candidate configuration. This
is a non-blocking call.
.. note::
Any user who wishes to use lock, must necessarily unlock the
configuration too. Ensure :py:func:`unlock <salt.states.junos.unlock>`
is called in the same orchestration run in which the lock is called.
.. code-block:: yaml
lock the config:
junos.lock
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.lock"]()
return ret
@resultdecorator
def unlock(name):
"""
Unlocks the candidate configuration.
.. code-block:: yaml
unlock the config:
junos.unlock
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.unlock"]()
return ret
@resultdecorator
def load(name, **kwargs):
"""
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos.load:
- name: salt://configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos.load:
- name: salt://configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
Sample template:
.. code-block:: bash
set interfaces {{ interface_name }} unit 0
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
.. note:: This option cannot be used if **format** is "set".
merge : False
If set to ``True`` will set the load-config action to merge.
the default load-config action is 'replace' for xml/json/text config
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.load"](name, **kwargs)
return ret
@resultdecorator
def commit_check(name):
"""
Perform a commit check on the configuration.
.. code-block:: yaml
perform commit check:
junos.commit_check
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.commit_check"]()
return ret
@resultdecorator
def get_table(name, table, table_file, **kwargs):
"""
.. versionadded:: 3001
Retrieve data from a Junos device using Tables/Views
.. code-block:: yaml
get route details:
junos.get_table:
- table: RouteTable
- table_file: routes.yml
get interface details:
junos.get_table:
- table: EthPortTable
- table_file: ethport.yml
- table_args:
interface_name: ge-0/0/0
name (required)
task definition
table (required)
Name of PyEZ Table
file
YAML file that has the table specified in table parameter
path:
Path of location of the YAML file.
defaults to op directory in jnpr.junos.op
target:
if command need to run on FPC, can specify fpc target
key:
To overwrite key provided in YAML
key_items:
To select only given key items
filters:
To select only filter for the dictionary from columns
template_args:
key/value pair which should render Jinja template command
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
ret["changes"] = __salt__["junos.get_table"](table, table_file, **kwargs)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/junos.py | 0.769384 | 0.237443 | junos.py | pypi |
import copy
import logging
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_elbv2.target_group_exists" in __salt__:
return "boto_elbv2"
return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found")
def create_target_group(
name,
protocol,
port,
vpc_id,
region=None,
key=None,
keyid=None,
profile=None,
health_check_protocol="HTTP",
health_check_port="traffic-port",
health_check_path="/",
health_check_interval_seconds=30,
health_check_timeout_seconds=5,
healthy_threshold_count=5,
unhealthy_threshold_count=2,
**kwargs
):
"""
.. versionadded:: 2017.11.0
Create target group if not present.
name
(string) - The name of the target group.
protocol
(string) - The protocol to use for routing traffic to the targets
port
(int) - The port on which the targets receive traffic. This port is used unless
you specify a port override when registering the traffic.
vpc_id
(string) - The identifier of the virtual private cloud (VPC).
health_check_protocol
(string) - The protocol the load balancer uses when performing health check on
targets. The default is the HTTP protocol.
health_check_port
(string) - The port the load balancer uses when performing health checks on
targets. The default is 'traffic-port', which indicates the port on which each
target receives traffic from the load balancer.
health_check_path
(string) - The ping path that is the destination on the targets for health
checks. The default is /.
health_check_interval_seconds
(integer) - The approximate amount of time, in seconds, between health checks
of an individual target. The default is 30 seconds.
health_check_timeout_seconds
(integer) - The amount of time, in seconds, during which no response from a
target means a failed health check. The default is 5 seconds.
healthy_threshold_count
(integer) - The number of consecutive health checks successes required before
considering an unhealthy target healthy. The default is 5.
unhealthy_threshold_count
(integer) - The number of consecutive health check failures required before
considering a target unhealthy. The default is 2.
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: yaml
create-target:
boto_elb2.create_targets_group:
- name: myALB
- protocol: https
- port: 443
- vpc_id: myVPC
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if __salt__["boto_elbv2.target_group_exists"](name, region, key, keyid, profile):
ret["result"] = True
ret["comment"] = "Target Group {} already exists".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Target Group {} will be created".format(name)
return ret
state = __salt__["boto_elbv2.create_target_group"](
name,
protocol,
port,
vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
health_check_protocol=health_check_protocol,
health_check_port=health_check_port,
health_check_path=health_check_path,
health_check_interval_seconds=health_check_interval_seconds,
health_check_timeout_seconds=health_check_timeout_seconds,
healthy_threshold_count=healthy_threshold_count,
unhealthy_threshold_count=unhealthy_threshold_count,
**kwargs
)
if state:
ret["changes"]["target_group"] = name
ret["result"] = True
ret["comment"] = "Target Group {} created".format(name)
else:
ret["result"] = False
ret["comment"] = "Target Group {} creation failed".format(name)
return ret
def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
"""
Delete target group.
name
(string) - The Amazon Resource Name (ARN) of the resource.
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
check-target:
boto_elb2.delete_targets_group:
- name: myALB
- protocol: https
- port: 443
- vpc_id: myVPC
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if not __salt__["boto_elbv2.target_group_exists"](
name, region, key, keyid, profile
):
ret["result"] = True
ret["comment"] = "Target Group {} does not exists".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Target Group {} will be deleted".format(name)
return ret
state = __salt__["boto_elbv2.delete_target_group"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if state:
ret["result"] = True
ret["changes"]["target_group"] = name
ret["comment"] = "Target Group {} deleted".format(name)
else:
ret["result"] = False
ret["comment"] = "Target Group {} deletion failed".format(name)
return ret
def targets_registered(
name, targets, region=None, key=None, keyid=None, profile=None, **kwargs
):
"""
.. versionadded:: 2017.7.0
Add targets to an Application Load Balancer target group. This state will not remove targets.
name
The ARN of the Application Load Balancer Target Group to add targets to.
targets
A list of target IDs or a string of a single target that this target group should
distribute traffic to.
.. code-block:: yaml
add-targets:
boto_elb.targets_registered:
- name: arn:myloadbalancer
- targets:
- instance-id1
- instance-id2
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if __salt__["boto_elbv2.target_group_exists"](name, region, key, keyid, profile):
health = __salt__["boto_elbv2.describe_target_health"](
name, region=region, key=key, keyid=keyid, profile=profile
)
failure = False
changes = False
newhealth_mock = copy.copy(health)
if isinstance(targets, str):
targets = [targets]
for target in targets:
if target in health and health.get(target) != "draining":
ret["comment"] = ret[
"comment"
] + "Target/s {} already registered and is {}.\n".format(
target, health[target]
)
ret["result"] = True
else:
if __opts__["test"]:
changes = True
newhealth_mock.update({target: "initial"})
else:
state = __salt__["boto_elbv2.register_targets"](
name,
targets,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if state:
changes = True
ret["result"] = True
else:
ret["comment"] = "Target Group {} failed to add targets".format(
name
)
failure = True
if failure:
ret["result"] = False
if changes:
ret["changes"]["old"] = health
if __opts__["test"]:
ret["comment"] = "Target Group {} would be changed".format(name)
ret["result"] = None
ret["changes"]["new"] = newhealth_mock
else:
ret["comment"] = "Target Group {} has been changed".format(name)
newhealth = __salt__["boto_elbv2.describe_target_health"](
name, region=region, key=key, keyid=keyid, profile=profile
)
ret["changes"]["new"] = newhealth
return ret
else:
ret["comment"] = "Could not find target group {}".format(name)
return ret
def targets_deregistered(
name, targets, region=None, key=None, keyid=None, profile=None, **kwargs
):
"""
Remove targets to an Application Load Balancer target group.
name
The ARN of the Application Load Balancer Target Group to remove targets from.
targets
A list of target IDs or a string of a single target registered to the target group to be removed
.. versionadded:: 2017.7.0
.. code-block:: yaml
remove-targets:
boto_elb.targets_deregistered:
- name: arn:myloadbalancer
- targets:
- instance-id1
- instance-id2
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if __salt__["boto_elbv2.target_group_exists"](name, region, key, keyid, profile):
health = __salt__["boto_elbv2.describe_target_health"](
name, region=region, key=key, keyid=keyid, profile=profile
)
failure = False
changes = False
newhealth_mock = copy.copy(health)
if isinstance(targets, str):
targets = [targets]
for target in targets:
if target not in health or health.get(target) == "draining":
ret["comment"] = ret[
"comment"
] + "Target/s {} already deregistered\n".format(target)
ret["result"] = True
else:
if __opts__["test"]:
changes = True
newhealth_mock.update({target: "draining"})
else:
state = __salt__["boto_elbv2.deregister_targets"](
name,
targets,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if state:
changes = True
ret["result"] = True
else:
ret[
"comment"
] = "Target Group {} failed to remove targets".format(name)
failure = True
if failure:
ret["result"] = False
if changes:
ret["changes"]["old"] = health
if __opts__["test"]:
ret["comment"] = "Target Group {} would be changed".format(name)
ret["result"] = None
ret["changes"]["new"] = newhealth_mock
else:
ret["comment"] = "Target Group {} has been changed".format(name)
newhealth = __salt__["boto_elbv2.describe_target_health"](
name, region=region, key=key, keyid=keyid, profile=profile
)
ret["changes"]["new"] = newhealth
return ret
else:
ret["comment"] = "Could not find target group {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_elbv2.py | 0.688049 | 0.208058 | boto_elbv2.py | pypi |
import copy
import salt.utils.json
from salt.utils.dictdiffer import DictDiffer
def __virtual__():
"""Only load if grafana4 module is available"""
if "grafana4.get_dashboard" in __salt__:
return True
return (False, "grafana4 module could not be loaded")
_DEFAULT_DASHBOARD_PILLAR = "grafana_dashboards:default"
_DEFAULT_PANEL_PILLAR = "grafana_panels:default"
_DEFAULT_ROW_PILLAR = "grafana_rows:default"
_PINNED_ROWS_PILLAR = "grafana_pinned_rows"
def present(
name,
base_dashboards_from_pillar=None,
base_panels_from_pillar=None,
base_rows_from_pillar=None,
dashboard=None,
orgname=None,
profile="grafana",
):
"""
Ensure the grafana dashboard exists and is managed.
name
Name of the grafana dashboard.
base_dashboards_from_pillar
A pillar key that contains a list of dashboards to inherit from
base_panels_from_pillar
A pillar key that contains a list of panels to inherit from
base_rows_from_pillar
A pillar key that contains a list of rows to inherit from
dashboard
A dict that defines a dashboard that should be managed.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
dashboard = dashboard or {}
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
# Add pillar keys for default configuration
base_dashboards_from_pillar = [_DEFAULT_DASHBOARD_PILLAR] + (
base_dashboards_from_pillar or []
)
base_panels_from_pillar = [_DEFAULT_PANEL_PILLAR] + (base_panels_from_pillar or [])
base_rows_from_pillar = [_DEFAULT_ROW_PILLAR] + (base_rows_from_pillar or [])
# Build out all dashboard fields
new_dashboard = _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret)
if "title" not in new_dashboard:
new_dashboard["title"] = name
rows = new_dashboard.get("rows", [])
for i, row in enumerate(rows):
rows[i] = _inherited_row(row, base_rows_from_pillar, ret)
for row in rows:
panels = row.get("panels", [])
for i, panel in enumerate(panels):
panels[i] = _inherited_panel(panel, base_panels_from_pillar, ret)
_auto_adjust_panel_spans(new_dashboard)
_ensure_panel_ids(new_dashboard)
_ensure_annotations(new_dashboard)
# Create dashboard if it does not exist
old_dashboard = __salt__["grafana4.get_dashboard"](name, orgname, profile)
if not old_dashboard:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dashboard {} is set to be created.".format(name)
return ret
response = __salt__["grafana4.create_update_dashboard"](
dashboard=new_dashboard, overwrite=True, profile=profile
)
if response.get("status") == "success":
ret["comment"] = "Dashboard {} created.".format(name)
ret["changes"]["new"] = "Dashboard {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create dashboard {}, response={}".format(
name, response
)
return ret
# Add unmanaged rows to the dashboard. They appear at the top if they are
# marked as pinned. They appear at the bottom otherwise.
managed_row_titles = [row.get("title") for row in new_dashboard.get("rows", [])]
new_rows = new_dashboard.get("rows", [])
for old_row in old_dashboard.get("rows", []):
if old_row.get("title") not in managed_row_titles:
new_rows.append(copy.deepcopy(old_row))
_ensure_pinned_rows(new_dashboard)
_ensure_panel_ids(new_dashboard)
# Update dashboard if it differs
dashboard_diff = DictDiffer(_cleaned(new_dashboard), _cleaned(old_dashboard))
updated_needed = (
dashboard_diff.changed() or dashboard_diff.added() or dashboard_diff.removed()
)
if updated_needed:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dashboard {} is set to be updated, changes={}".format(
name,
salt.utils.json.dumps(
_dashboard_diff(_cleaned(new_dashboard), _cleaned(old_dashboard)),
indent=4,
),
)
return ret
response = __salt__["grafana4.create_update_dashboard"](
dashboard=new_dashboard, overwrite=True, profile=profile
)
if response.get("status") == "success":
updated_dashboard = __salt__["grafana4.get_dashboard"](
name, orgname, profile
)
dashboard_diff = DictDiffer(
_cleaned(updated_dashboard), _cleaned(old_dashboard)
)
ret["comment"] = "Dashboard {} updated.".format(name)
ret["changes"] = _dashboard_diff(
_cleaned(new_dashboard), _cleaned(old_dashboard)
)
else:
ret["result"] = False
ret["comment"] = "Failed to update dashboard {}, response={}".format(
name, response
)
return ret
ret["comment"] = "Dashboard present"
return ret
def absent(name, orgname=None, profile="grafana"):
"""
Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
existing_dashboard = __salt__["grafana4.get_dashboard"](name, orgname, profile)
if existing_dashboard:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dashboard {} is set to be deleted.".format(name)
return ret
__salt__["grafana4.delete_dashboard"](name, profile=profile)
ret["comment"] = "Dashboard {} deleted.".format(name)
ret["changes"]["new"] = "Dashboard {} deleted.".format(name)
return ret
ret["comment"] = "Dashboard absent"
return ret
_IGNORED_DASHBOARD_FIELDS = [
"id",
"uid",
"originalTitle",
"version",
]
_IGNORED_ROW_FIELDS = []
_IGNORED_PANEL_FIELDS = [
"grid",
"mode",
"tooltip",
]
_IGNORED_TARGET_FIELDS = [
"textEditor",
]
def _cleaned(_dashboard):
"""Return a copy without fields that can differ."""
dashboard = copy.deepcopy(_dashboard)
for ignored_dashboard_field in _IGNORED_DASHBOARD_FIELDS:
dashboard.pop(ignored_dashboard_field, None)
for row in dashboard.get("rows", []):
for ignored_row_field in _IGNORED_ROW_FIELDS:
row.pop(ignored_row_field, None)
for i, panel in enumerate(row.get("panels", [])):
for ignored_panel_field in _IGNORED_PANEL_FIELDS:
panel.pop(ignored_panel_field, None)
for target in panel.get("targets", []):
for ignored_target_field in _IGNORED_TARGET_FIELDS:
target.pop(ignored_target_field, None)
row["panels"][i] = _stripped(panel)
return dashboard
def _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret):
"""Return a dashboard with properties from parents."""
base_dashboards = []
for base_dashboard_from_pillar in base_dashboards_from_pillar:
base_dashboard = __salt__["pillar.get"](base_dashboard_from_pillar)
if base_dashboard:
base_dashboards.append(base_dashboard)
elif base_dashboard_from_pillar != _DEFAULT_DASHBOARD_PILLAR:
ret.setdefault("warnings", [])
warning_message = 'Cannot find dashboard pillar "{}".'.format(
base_dashboard_from_pillar
)
if warning_message not in ret["warnings"]:
ret["warnings"].append(warning_message)
base_dashboards.append(dashboard)
result_dashboard = {}
tags = set()
for dashboard in base_dashboards:
tags.update(dashboard.get("tags", []))
result_dashboard.update(dashboard)
result_dashboard["tags"] = list(tags)
return result_dashboard
def _inherited_row(row, base_rows_from_pillar, ret):
"""Return a row with properties from parents."""
base_rows = []
for base_row_from_pillar in base_rows_from_pillar:
base_row = __salt__["pillar.get"](base_row_from_pillar)
if base_row:
base_rows.append(base_row)
elif base_row_from_pillar != _DEFAULT_ROW_PILLAR:
ret.setdefault("warnings", [])
warning_message = 'Cannot find row pillar "{}".'.format(
base_row_from_pillar
)
if warning_message not in ret["warnings"]:
ret["warnings"].append(warning_message)
base_rows.append(row)
result_row = {}
for row in base_rows:
result_row.update(row)
return result_row
def _inherited_panel(panel, base_panels_from_pillar, ret):
"""Return a panel with properties from parents."""
base_panels = []
for base_panel_from_pillar in base_panels_from_pillar:
base_panel = __salt__["pillar.get"](base_panel_from_pillar)
if base_panel:
base_panels.append(base_panel)
elif base_panel_from_pillar != _DEFAULT_PANEL_PILLAR:
ret.setdefault("warnings", [])
warning_message = 'Cannot find panel pillar "{}".'.format(
base_panel_from_pillar
)
if warning_message not in ret["warnings"]:
ret["warnings"].append(warning_message)
base_panels.append(panel)
result_panel = {}
for panel in base_panels:
result_panel.update(panel)
return result_panel
_FULL_LEVEL_SPAN = 12
_DEFAULT_PANEL_SPAN = 2.5
def _auto_adjust_panel_spans(dashboard):
"""Adjust panel spans to take up the available width.
For each group of panels that would be laid out on the same level, scale up
the unspecified panel spans to fill up the level.
"""
for row in dashboard.get("rows", []):
levels = []
current_level = []
levels.append(current_level)
for panel in row.get("panels", []):
current_level_span = sum(
panel.get("span", _DEFAULT_PANEL_SPAN) for panel in current_level
)
span = panel.get("span", _DEFAULT_PANEL_SPAN)
if current_level_span + span > _FULL_LEVEL_SPAN:
current_level = [panel]
levels.append(current_level)
else:
current_level.append(panel)
for level in levels:
specified_panels = [panel for panel in level if "span" in panel]
unspecified_panels = [panel for panel in level if "span" not in panel]
if not unspecified_panels:
continue
specified_span = sum(panel["span"] for panel in specified_panels)
available_span = _FULL_LEVEL_SPAN - specified_span
auto_span = float(available_span) / len(unspecified_panels)
for panel in unspecified_panels:
panel["span"] = auto_span
def _ensure_pinned_rows(dashboard):
"""Pin rows to the top of the dashboard."""
pinned_row_titles = __salt__["pillar.get"](_PINNED_ROWS_PILLAR)
if not pinned_row_titles:
return
pinned_row_titles_lower = []
for title in pinned_row_titles:
pinned_row_titles_lower.append(title.lower())
rows = dashboard.get("rows", [])
pinned_rows = []
for i, row in enumerate(rows):
if row.get("title", "").lower() in pinned_row_titles_lower:
del rows[i]
pinned_rows.append(row)
rows = pinned_rows + rows
def _ensure_panel_ids(dashboard):
"""Assign panels auto-incrementing IDs."""
panel_id = 1
for row in dashboard.get("rows", []):
for panel in row.get("panels", []):
panel["id"] = panel_id
panel_id += 1
def _ensure_annotations(dashboard):
"""Explode annotation_tags into annotations."""
if "annotation_tags" not in dashboard:
return
tags = dashboard["annotation_tags"]
annotations = {
"enable": True,
"list": [],
}
for tag in tags:
annotations["list"].append(
{
"datasource": "graphite",
"enable": False,
"iconColor": "#C0C6BE",
"iconSize": 13,
"lineColor": "rgba(255, 96, 96, 0.592157)",
"name": tag,
"showLine": True,
"tags": tag,
}
)
del dashboard["annotation_tags"]
dashboard["annotations"] = annotations
def _dashboard_diff(_new_dashboard, _old_dashboard):
"""Return a dictionary of changes between dashboards."""
diff = {}
# Dashboard diff
new_dashboard = copy.deepcopy(_new_dashboard)
old_dashboard = copy.deepcopy(_old_dashboard)
dashboard_diff = DictDiffer(new_dashboard, old_dashboard)
diff["dashboard"] = _stripped(
{
"changed": list(dashboard_diff.changed()) or None,
"added": list(dashboard_diff.added()) or None,
"removed": list(dashboard_diff.removed()) or None,
}
)
# Row diff
new_rows = new_dashboard.get("rows", [])
old_rows = old_dashboard.get("rows", [])
new_rows_by_title = {}
old_rows_by_title = {}
for row in new_rows:
if "title" in row:
new_rows_by_title[row["title"]] = row
for row in old_rows:
if "title" in row:
old_rows_by_title[row["title"]] = row
rows_diff = DictDiffer(new_rows_by_title, old_rows_by_title)
diff["rows"] = _stripped(
{
"added": list(rows_diff.added()) or None,
"removed": list(rows_diff.removed()) or None,
}
)
for changed_row_title in rows_diff.changed():
old_row = old_rows_by_title[changed_row_title]
new_row = new_rows_by_title[changed_row_title]
row_diff = DictDiffer(new_row, old_row)
diff["rows"].setdefault("changed", {})
diff["rows"]["changed"][changed_row_title] = _stripped(
{
"changed": list(row_diff.changed()) or None,
"added": list(row_diff.added()) or None,
"removed": list(row_diff.removed()) or None,
}
)
# Panel diff
old_panels_by_id = {}
new_panels_by_id = {}
for row in old_dashboard.get("rows", []):
for panel in row.get("panels", []):
if "id" in panel:
old_panels_by_id[panel["id"]] = panel
for row in new_dashboard.get("rows", []):
for panel in row.get("panels", []):
if "id" in panel:
new_panels_by_id[panel["id"]] = panel
panels_diff = DictDiffer(new_panels_by_id, old_panels_by_id)
diff["panels"] = _stripped(
{
"added": list(panels_diff.added()) or None,
"removed": list(panels_diff.removed()) or None,
}
)
for changed_panel_id in panels_diff.changed():
old_panel = old_panels_by_id[changed_panel_id]
new_panel = new_panels_by_id[changed_panel_id]
panels_diff = DictDiffer(new_panel, old_panel)
diff["panels"].setdefault("changed", {})
diff["panels"]["changed"][changed_panel_id] = _stripped(
{
"changed": list(panels_diff.changed()) or None,
"added": list(panels_diff.added()) or None,
"removed": list(panels_diff.removed()) or None,
}
)
return diff
def _stripped(d):
"""Strip falsey entries."""
ret = {}
for k, v in d.items():
if v:
ret[k] = v
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grafana4_dashboard.py | 0.60778 | 0.191422 | grafana4_dashboard.py | pypi |
def __virtual__():
"""
Only make this state available if the selinux module is available.
"""
if "selinux.getenforce" in __salt__:
return "selinux"
return (False, "selinux module could not be loaded")
def _refine_mode(mode):
"""
Return a mode value that is predictable
"""
mode = str(mode).lower()
if any([mode.startswith("e"), mode == "1", mode == "on"]):
return "Enforcing"
if any([mode.startswith("p"), mode == "0", mode == "off"]):
return "Permissive"
if any([mode.startswith("d")]):
return "Disabled"
return "unknown"
def _refine_value(value):
"""
Return a yes/no value, or None if the input is invalid
"""
value = str(value).lower()
if value in ("1", "on", "yes", "true"):
return "on"
if value in ("0", "off", "no", "false"):
return "off"
return None
def _refine_module_state(module_state):
"""
Return a predictable value, or allow us to error out
.. versionadded:: 2016.3.0
"""
module_state = str(module_state).lower()
if module_state in ("1", "on", "yes", "true", "enabled"):
return "enabled"
if module_state in ("0", "off", "no", "false", "disabled"):
return "disabled"
return "unknown"
def mode(name):
"""
Verifies the mode SELinux is running in, can be set to enforcing,
permissive, or disabled
.. note::
A change to or from disabled mode requires a system reboot. You will
need to perform this yourself.
name
The mode to run SELinux in, permissive, enforcing, or disabled.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
tmode = _refine_mode(name)
if tmode == "unknown":
ret["comment"] = "{} is not an accepted mode".format(name)
return ret
# Either the current mode in memory or a non-matching config value
# will trigger setenforce
mode = __salt__["selinux.getenforce"]()
config = __salt__["selinux.getconfig"]()
# Just making sure the oldmode reflects the thing that didn't match tmode
if mode == tmode and mode != config and tmode != config:
mode = config
if mode == tmode:
ret["result"] = True
ret["comment"] = "SELinux is already in {} mode".format(tmode)
return ret
# The mode needs to change...
if __opts__["test"]:
ret["comment"] = "SELinux mode is set to be changed to {}".format(tmode)
ret["result"] = None
ret["changes"] = {"old": mode, "new": tmode}
return ret
oldmode, mode = mode, __salt__["selinux.setenforce"](tmode)
if mode == tmode or (
tmode == "Disabled" and __salt__["selinux.getconfig"]() == tmode
):
ret["result"] = True
ret["comment"] = "SELinux has been set to {} mode".format(tmode)
ret["changes"] = {"old": oldmode, "new": mode}
return ret
ret["comment"] = "Failed to set SELinux to {} mode".format(tmode)
return ret
def boolean(name, value, persist=False):
"""
Set up an SELinux boolean
name
The name of the boolean to set
value
The value to set on the boolean
persist
Defaults to False, set persist to true to make the boolean apply on a
reboot
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
bools = __salt__["selinux.list_sebool"]()
if name not in bools:
ret["comment"] = "Boolean {} is not available".format(name)
ret["result"] = False
return ret
rvalue = _refine_value(value)
if rvalue is None:
ret["comment"] = "{} is not a valid value for the boolean".format(value)
ret["result"] = False
return ret
state = bools[name]["State"] == rvalue
default = bools[name]["Default"] == rvalue
if persist:
if state and default:
ret["comment"] = "Boolean is in the correct state"
return ret
else:
if state:
ret["comment"] = "Boolean is in the correct state"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Boolean {} is set to be changed to {}".format(name, rvalue)
return ret
ret["result"] = __salt__["selinux.setsebool"](name, rvalue, persist)
if ret["result"]:
ret["comment"] = "Boolean {} has been set to {}".format(name, rvalue)
ret["changes"].update({"State": {"old": bools[name]["State"], "new": rvalue}})
if persist and not default:
ret["changes"].update(
{"Default": {"old": bools[name]["Default"], "new": rvalue}}
)
return ret
ret["comment"] = "Failed to set the boolean {} to {}".format(name, rvalue)
return ret
def module(name, module_state="Enabled", version="any", **opts):
"""
Enable/Disable and optionally force a specific version for an SELinux module
name
The name of the module to control
module_state
Should the module be enabled or disabled?
version
Defaults to no preference, set to a specified value if required.
Currently can only alert if the version is incorrect.
install
Setting to True installs module
source
Points to module source file, used only when install is True
remove
Setting to True removes module
.. versionadded:: 2016.3.0
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if opts.get("install", False) and opts.get("remove", False):
ret["result"] = False
ret["comment"] = "Cannot install and remove at the same time"
return ret
if opts.get("install", False):
module_path = opts.get("source", name)
ret = module_install(module_path)
if not ret["result"]:
return ret
elif opts.get("remove", False):
return module_remove(name)
modules = __salt__["selinux.list_semod"]()
if name not in modules:
ret["comment"] = "Module {} is not available".format(name)
ret["result"] = False
return ret
rmodule_state = _refine_module_state(module_state)
if rmodule_state == "unknown":
ret["comment"] = "{} is not a valid state for the {} module.".format(
module_state, module
)
ret["result"] = False
return ret
if version != "any":
installed_version = modules[name]["Version"]
if not installed_version == version:
ret["comment"] = (
"Module version is {} and does not match "
"the desired version of {} or you are "
"using semodule >= 2.4".format(installed_version, version)
)
ret["result"] = False
return ret
current_module_state = _refine_module_state(modules[name]["Enabled"])
if rmodule_state == current_module_state:
ret["comment"] = "Module {} is in the desired state".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Module {} is set to be toggled to {}".format(
name, module_state
)
return ret
if __salt__["selinux.setsemod"](name, rmodule_state):
ret["comment"] = "Module {} has been set to {}".format(name, module_state)
return ret
ret["result"] = False
ret["comment"] = "Failed to set the Module {} to {}".format(name, module_state)
return ret
def module_install(name):
"""
Installs custom SELinux module from given file
name
Path to file with module to install
.. versionadded:: 2016.11.6
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if __salt__["selinux.install_semod"](name):
ret["comment"] = "Module {} has been installed".format(name)
return ret
ret["result"] = False
ret["comment"] = "Failed to install module {}".format(name)
return ret
def module_remove(name):
"""
Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
modules = __salt__["selinux.list_semod"]()
if name not in modules:
ret["comment"] = "Module {} is not available".format(name)
ret["result"] = False
return ret
if __salt__["selinux.remove_semod"](name):
ret["comment"] = "Module {} has been removed".format(name)
return ret
ret["result"] = False
ret["comment"] = "Failed to remove module {}".format(name)
return ret
def fcontext_policy_present(
name, sel_type, filetype="a", sel_user=None, sel_level=None
):
"""
.. versionadded:: 2017.7.0
Makes sure a SELinux policy for a given filespec (name), filetype
and SELinux context type is present.
name
filespec of the file or directory. Regex syntax is allowed.
sel_type
SELinux context type. There are many.
filetype
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
(all files).
sel_user
The SELinux user.
sel_level
The SELinux MLS range.
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
new_state = {}
old_state = {}
filetype_str = __salt__["selinux.filetype_id_to_string"](filetype)
current_state = __salt__["selinux.fcontext_get_policy"](
name=name,
filetype=filetype,
sel_type=sel_type,
sel_user=sel_user,
sel_level=sel_level,
)
if not current_state:
new_state = {name: {"filetype": filetype_str, "sel_type": sel_type}}
if __opts__["test"]:
ret.update({"result": None})
else:
add_ret = __salt__["selinux.fcontext_add_policy"](
name=name,
filetype=filetype,
sel_type=sel_type,
sel_user=sel_user,
sel_level=sel_level,
)
if add_ret["retcode"] != 0:
ret.update({"comment": "Error adding new rule: {}".format(add_ret)})
else:
ret.update({"result": True})
else:
if current_state["sel_type"] != sel_type:
old_state.update({name: {"sel_type": current_state["sel_type"]}})
new_state.update({name: {"sel_type": sel_type}})
else:
ret.update(
{
"result": True,
"comment": 'SELinux policy for "{}" already present '.format(name)
+ 'with specified filetype "{}" and sel_type "{}".'.format(
filetype_str, sel_type
),
}
)
return ret
# Removal of current rule is not neccesary, since adding a new rule for the same
# filespec and the same filetype automatically overwrites
if __opts__["test"]:
ret.update({"result": None})
else:
change_ret = __salt__["selinux.fcontext_add_policy"](
name=name,
filetype=filetype,
sel_type=sel_type,
sel_user=sel_user,
sel_level=sel_level,
)
if change_ret["retcode"] != 0:
ret.update({"comment": "Error adding new rule: {}".format(change_ret)})
else:
ret.update({"result": True})
if ret["result"] and (new_state or old_state):
ret["changes"].update({"old": old_state, "new": new_state})
return ret
def fcontext_policy_absent(
name, filetype="a", sel_type=None, sel_user=None, sel_level=None
):
"""
.. versionadded:: 2017.7.0
Makes sure an SELinux file context policy for a given filespec
(name), filetype and SELinux context type is absent.
name
filespec of the file or directory. Regex syntax is allowed.
filetype
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
(all files).
sel_type
The SELinux context type. There are many.
sel_user
The SELinux user.
sel_level
The SELinux MLS range.
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
new_state = {}
old_state = {}
current_state = __salt__["selinux.fcontext_get_policy"](
name=name,
filetype=filetype,
sel_type=sel_type,
sel_user=sel_user,
sel_level=sel_level,
)
if not current_state:
ret.update(
{
"result": True,
"comment": 'SELinux policy for "{}" already absent '.format(name)
+ 'with specified filetype "{}" and sel_type "{}".'.format(
filetype, sel_type
),
}
)
return ret
else:
old_state.update({name: current_state})
ret["changes"].update({"old": old_state, "new": new_state})
if __opts__["test"]:
ret.update({"result": None})
else:
remove_ret = __salt__["selinux.fcontext_delete_policy"](
name=name,
filetype=filetype,
sel_type=sel_type or current_state["sel_type"],
sel_user=sel_user,
sel_level=sel_level,
)
if remove_ret["retcode"] != 0:
ret.update({"comment": "Error removing policy: {}".format(remove_ret)})
else:
ret.update({"result": True})
return ret
def fcontext_policy_applied(name, recursive=False):
"""
.. versionadded:: 2017.7.0
Checks and makes sure the SELinux policies for a given filespec are
applied.
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
changes_text = __salt__["selinux.fcontext_policy_is_applied"](name, recursive)
if changes_text == "":
ret.update(
{
"result": True,
"comment": (
'SElinux policies are already applied for filespec "{}"'.format(
name
)
),
}
)
return ret
if __opts__["test"]:
ret.update({"result": None})
else:
apply_ret = __salt__["selinux.fcontext_apply_policy"](name, recursive)
if apply_ret["retcode"] != 0:
ret.update({"comment": apply_ret})
else:
ret.update({"result": True})
ret.update({"changes": apply_ret.get("changes")})
return ret
def port_policy_present(name, sel_type, protocol=None, port=None, sel_range=None):
"""
.. versionadded:: 2019.2.0
Makes sure an SELinux port policy for a given port, protocol and SELinux context type is present.
name
The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``.
sel_type
The SELinux Type.
protocol
The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted.
port
The port or port range. Required if name is not formatted.
sel_range
The SELinux MLS/MCS Security Range.
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
old_state = __salt__["selinux.port_get_policy"](
name=name,
sel_type=sel_type,
protocol=protocol,
port=port,
)
if old_state:
ret.update(
{
"result": True,
"comment": 'SELinux policy for "{}" already present '.format(name)
+ 'with specified sel_type "{}", protocol "{}" and port "{}".'.format(
sel_type, protocol, port
),
}
)
return ret
if __opts__["test"]:
ret.update({"result": None})
else:
add_ret = __salt__["selinux.port_add_policy"](
name=name,
sel_type=sel_type,
protocol=protocol,
port=port,
sel_range=sel_range,
)
if add_ret["retcode"] != 0:
ret.update({"comment": "Error adding new policy: {}".format(add_ret)})
else:
ret.update({"result": True})
new_state = __salt__["selinux.port_get_policy"](
name=name,
sel_type=sel_type,
protocol=protocol,
port=port,
)
ret["changes"].update({"old": old_state, "new": new_state})
return ret
def port_policy_absent(name, sel_type=None, protocol=None, port=None):
"""
.. versionadded:: 2019.2.0
Makes sure an SELinux port policy for a given port, protocol and SELinux context type is absent.
name
The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``.
sel_type
The SELinux Type. Optional; can be used in determining if policy is present,
ignored by ``semanage port --delete``.
protocol
The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted.
port
The port or port range. Required if name is not formatted.
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
old_state = __salt__["selinux.port_get_policy"](
name=name,
sel_type=sel_type,
protocol=protocol,
port=port,
)
if not old_state:
ret.update(
{
"result": True,
"comment": 'SELinux policy for "{}" already absent '.format(name)
+ 'with specified sel_type "{}", protocol "{}" and port "{}".'.format(
sel_type, protocol, port
),
}
)
return ret
if __opts__["test"]:
ret.update({"result": None})
else:
delete_ret = __salt__["selinux.port_delete_policy"](
name=name,
protocol=protocol,
port=port,
)
if delete_ret["retcode"] != 0:
ret.update({"comment": "Error deleting policy: {}".format(delete_ret)})
else:
ret.update({"result": True})
new_state = __salt__["selinux.port_get_policy"](
name=name,
sel_type=sel_type,
protocol=protocol,
port=port,
)
ret["changes"].update({"old": old_state, "new": new_state})
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/selinux.py | 0.609873 | 0.234056 | selinux.py | pypi |
def __virtual__():
"""
Only load if the layman module is available in __salt__
"""
if "layman.add" in __salt__:
return "layman"
return (False, "layman module could not be loaded")
def present(name):
"""
Verify that the overlay is present
name
The name of the overlay to add
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
# Overlay already present
if name in __salt__["layman.list_local"]():
ret["comment"] = "Overlay {} already present".format(name)
elif __opts__["test"]:
ret["comment"] = "Overlay {} is set to be added".format(name)
ret["result"] = None
return ret
else:
# Does the overlay exist?
if name not in __salt__["layman.list_all"]():
ret["comment"] = "Overlay {} not found".format(name)
ret["result"] = False
else:
# Attempt to add the overlay
changes = __salt__["layman.add"](name)
# The overlay failed to add
if len(changes) < 1:
ret["comment"] = "Overlay {} failed to add".format(name)
ret["result"] = False
# Success
else:
ret["changes"]["added"] = changes
ret["comment"] = "Overlay {} added.".format(name)
return ret
def absent(name):
"""
Verify that the overlay is absent
name
The name of the overlay to delete
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
# Overlay is already absent
if name not in __salt__["layman.list_local"]():
ret["comment"] = "Overlay {} already absent".format(name)
elif __opts__["test"]:
ret["comment"] = "Overlay {} is set to be deleted".format(name)
ret["result"] = None
return ret
else:
# Attempt to delete the overlay
changes = __salt__["layman.delete"](name)
# The overlay failed to delete
if len(changes) < 1:
ret["comment"] = "Overlay {} failed to delete".format(name)
ret["result"] = False
# Success
else:
ret["changes"]["deleted"] = changes
ret["comment"] = "Overlay {} deleted.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/layman.py | 0.524882 | 0.188343 | layman.py | pypi |
import os.path
import sys
import salt.utils.files
import salt.utils.stringutils
def __virtual__():
"""
Only load if the mysql module is available in __salt__
"""
if "mysql.query" in __salt__:
return True
return (False, "mysql module could not be loaded")
def _get_mysql_error():
"""
Look in module context for a MySQL error. Eventually we should make a less
ugly way of doing this.
"""
return sys.modules[__salt__["test.ping"].__module__].__context__.pop(
"mysql.error", None
)
def run_file(
name,
database,
query_file=None,
output=None,
grain=None,
key=None,
overwrite=True,
saltenv=None,
check_db_exists=True,
client_flags=None,
**connection_args
):
"""
Execute an arbitrary query on the specified database
.. versionadded:: 2017.7.0
name
Used only as an ID
database
The name of the database to execute the query_file on
query_file
The file of mysql commands to run
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
saltenv:
The saltenv to pull the query_file from
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
client_flags:
A list of client flags to pass to the MySQL connection.
https://dev.mysql.com/doc/internals/en/capability-flags.html
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Database {} is already present".format(database),
}
if client_flags is None:
client_flags = []
connection_args["client_flags"] = client_flags
if not isinstance(client_flags, list):
ret["comment"] = "Error: client_flags must be a list."
ret["result"] = False
return ret
if any(
[
query_file.startswith(proto)
for proto in ["http://", "https://", "salt://", "s3://", "swift://"]
]
):
query_file = __salt__["cp.cache_file"](query_file, saltenv=saltenv or __env__)
if not os.path.exists(query_file):
ret["comment"] = "File {} does not exist".format(query_file)
ret["result"] = False
return ret
# check if database exists
if check_db_exists and not __salt__["mysql.db_exists"](database, **connection_args):
err = _get_mysql_error()
if err is not None:
ret["comment"] = err
ret["result"] = False
return ret
ret["result"] = None
ret["comment"] = "Database {} is not present".format(database)
return ret
# Check if execution needed
if output == "grain":
if grain is not None and key is None:
if not overwrite and grain in __salt__["grains.ls"]():
ret["comment"] = "No execution needed. Grain " + grain + " already set"
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Query would execute, storing result in " + "grain: " + grain
)
return ret
elif grain is not None:
if grain in __salt__["grains.ls"]():
grain_value = __salt__["grains.get"](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret["comment"] = (
"No execution needed. Grain " + grain + ":" + key + " already set"
)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Query would execute, storing result in "
+ "grain: "
+ grain
+ ":"
+ key
)
return ret
else:
ret["result"] = False
ret["comment"] = (
"Error: output type 'grain' needs the grain " + "parameter\n"
)
return ret
elif output is not None:
if not overwrite and os.path.isfile(output):
ret["comment"] = "No execution needed. File " + output + " already set"
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Query would execute, storing result in " + "file: " + output
)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = "Query would execute, not storing result"
return ret
# The database is present, execute the query
query_result = __salt__["mysql.file_query"](database, query_file, **connection_args)
if query_result is False:
ret["result"] = False
return ret
mapped_results = []
if "results" in query_result:
for res in query_result["results"]:
mapped_line = {}
for idx, col in enumerate(query_result["columns"]):
mapped_line[col] = res[idx]
mapped_results.append(mapped_line)
query_result["results"] = mapped_results
ret["comment"] = str(query_result)
if output == "grain":
if grain is not None and key is None:
__salt__["grains.setval"](grain, query_result)
ret["changes"]["query"] = "Executed. Output into grain: " + grain
elif grain is not None:
if grain in __salt__["grains.ls"]():
grain_value = __salt__["grains.get"](grain)
else:
grain_value = {}
grain_value[key] = query_result
__salt__["grains.setval"](grain, grain_value)
ret["changes"]["query"] = (
"Executed. Output into grain: " + grain + ":" + key
)
elif output is not None:
ret["changes"]["query"] = "Executed. Output into " + output
with salt.utils.files.fopen(output, "w") as output_file:
if "results" in query_result:
for res in query_result["results"]:
for col, val in res.items():
output_file.write(
salt.utils.stringutils.to_str(col + ":" + val + "\n")
)
else:
output_file.write(salt.utils.stringutils.to_str(query_result))
else:
ret["changes"]["query"] = "Executed"
return ret
def run(
name,
database,
query,
output=None,
grain=None,
key=None,
overwrite=True,
check_db_exists=True,
client_flags=None,
**connection_args
):
"""
Execute an arbitrary query on the specified database
name
Used only as an ID
database
The name of the database to execute the query on
query
The query to execute
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
check_db_exists:
The state run will check that the specified database exists (default=True)
before running any queries
client_flags:
A list of client flags to pass to the MySQL connection.
https://dev.mysql.com/doc/internals/en/capability-flags.html
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Database {} is already present".format(database),
}
if client_flags is None:
client_flags = []
connection_args["client_flags"] = client_flags
if not isinstance(client_flags, list):
ret["comment"] = "Error: client_flags must be a list."
ret["result"] = False
return ret
# check if database exists
if check_db_exists and not __salt__["mysql.db_exists"](database, **connection_args):
err = _get_mysql_error()
if err is not None:
ret["comment"] = err
ret["result"] = False
return ret
ret["result"] = None
ret["comment"] = "Database {} is not present".format(name)
return ret
# Check if execution needed
if output == "grain":
if grain is not None and key is None:
if not overwrite and grain in __salt__["grains.ls"]():
ret["comment"] = "No execution needed. Grain " + grain + " already set"
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Query would execute, storing result in " + "grain: " + grain
)
return ret
elif grain is not None:
if grain in __salt__["grains.ls"]():
grain_value = __salt__["grains.get"](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret["comment"] = (
"No execution needed. Grain " + grain + ":" + key + " already set"
)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Query would execute, storing result in "
+ "grain: "
+ grain
+ ":"
+ key
)
return ret
else:
ret["result"] = False
ret["comment"] = (
"Error: output type 'grain' needs the grain " + "parameter\n"
)
return ret
elif output is not None:
if not overwrite and os.path.isfile(output):
ret["comment"] = "No execution needed. File " + output + " already set"
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Query would execute, storing result in " + "file: " + output
)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = "Query would execute, not storing result"
return ret
# The database is present, execute the query
query_result = __salt__["mysql.query"](database, query, **connection_args)
mapped_results = []
if "results" in query_result:
for res in query_result["results"]:
mapped_line = {}
for idx, col in enumerate(query_result["columns"]):
mapped_line[col] = res[idx]
mapped_results.append(mapped_line)
query_result["results"] = mapped_results
ret["comment"] = str(query_result)
if output == "grain":
if grain is not None and key is None:
__salt__["grains.setval"](grain, query_result)
ret["changes"]["query"] = "Executed. Output into grain: " + grain
elif grain is not None:
if grain in __salt__["grains.ls"]():
grain_value = __salt__["grains.get"](grain)
else:
grain_value = {}
grain_value[key] = query_result
__salt__["grains.setval"](grain, grain_value)
ret["changes"]["query"] = (
"Executed. Output into grain: " + grain + ":" + key
)
elif output is not None:
ret["changes"]["query"] = "Executed. Output into " + output
with salt.utils.files.fopen(output, "w") as output_file:
if "results" in query_result:
for res in query_result["results"]:
for col, val in res.items():
output_file.write(
salt.utils.stringutils.to_str(col + ":" + val + "\n")
)
else:
if isinstance(query_result, str):
output_file.write(salt.utils.stringutils.to_str(query_result))
else:
for col, val in query_result.items():
output_file.write(
salt.utils.stringutils.to_str("{}:{}\n".format(col, val))
)
else:
ret["changes"]["query"] = "Executed"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mysql_query.py | 0.431105 | 0.173393 | mysql_query.py | pypi |
import logging
import salt.utils.json
log = logging.getLogger(__name__)
def index_absent(name):
"""
Ensure that the named index is absent.
name
Name of the index to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index = __salt__["elasticsearch.index_get"](index=name)
if index and name in index:
if __opts__["test"]:
ret["comment"] = "Index {} will be removed".format(name)
ret["changes"]["old"] = index[name]
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.index_delete"](index=name)
if ret["result"]:
ret["comment"] = "Successfully removed index {}".format(name)
ret["changes"]["old"] = index[name]
else:
ret[
"comment"
] = "Failed to remove index {} for unknown reasons".format(name)
else:
ret["comment"] = "Index {} is already absent".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def index_present(name, definition=None):
"""
Ensure that the named index is present.
name
Name of the index to add
definition
Optional dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
**Example:**
.. code-block:: yaml
# Default settings
mytestindex:
elasticsearch_index.present
# Extra settings
mytestindex2:
elasticsearch_index.present:
- definition:
settings:
index:
number_of_shards: 10
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index_exists = __salt__["elasticsearch.index_exists"](index=name)
if not index_exists:
if __opts__["test"]:
ret["comment"] = "Index {} does not exist and will be created".format(
name
)
ret["changes"] = {"new": definition}
ret["result"] = None
else:
output = __salt__["elasticsearch.index_create"](
index=name, body=definition
)
if output:
ret["comment"] = "Successfully created index {}".format(name)
ret["changes"] = {
"new": __salt__["elasticsearch.index_get"](index=name)[name]
}
else:
ret["result"] = False
ret["comment"] = "Cannot create index {}, {}".format(name, output)
else:
ret["comment"] = "Index {} is already present".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def alias_absent(name, index):
"""
Ensure that the index alias is absent.
name
Name of the index alias to remove
index
Name of the index for the alias
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
alias = __salt__["elasticsearch.alias_get"](aliases=name, indices=index)
if (
alias
and alias.get(index, {}).get("aliases", {}).get(name, None) is not None
):
if __opts__["test"]:
ret["comment"] = "Alias {} for index {} will be removed".format(
name, index
)
ret["changes"]["old"] = (
alias.get(index, {}).get("aliases", {}).get(name, {})
)
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.alias_delete"](
aliases=name, indices=index
)
if ret["result"]:
ret[
"comment"
] = "Successfully removed alias {} for index {}".format(name, index)
ret["changes"]["old"] = (
alias.get(index, {}).get("aliases", {}).get(name, {})
)
else:
ret[
"comment"
] = "Failed to remove alias {} for index {} for unknown reasons".format(
name, index
)
else:
ret["comment"] = "Alias {} for index {} is already absent".format(
name, index
)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def alias_present(name, index, definition=None):
"""
Ensure that the named index alias is present.
name
Name of the alias
index
Name of the index
definition
Optional dict for filters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
**Example:**
.. code-block:: yaml
mytestalias:
elasticsearch.alias_present:
- index: testindex
- definition:
filter:
term:
user: kimchy
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
alias = __salt__["elasticsearch.alias_get"](aliases=name, indices=index)
old = {}
if alias:
old = alias.get(index, {}).get("aliases", {}).get(name, {})
if not definition:
definition = {}
ret["changes"] = __utils__["dictdiffer.deep_diff"](old, definition)
if ret["changes"] or not definition:
if __opts__["test"]:
if not old:
ret[
"comment"
] = "Alias {} for index {} does not exist and will be created".format(
name, index
)
else:
ret["comment"] = (
"Alias {} for index {} exists with wrong configuration and will"
" be overridden".format(name, index)
)
ret["result"] = None
else:
output = __salt__["elasticsearch.alias_create"](
alias=name, indices=index, body=definition
)
if output:
if not old:
ret[
"comment"
] = "Successfully created alias {} for index {}".format(
name, index
)
else:
ret[
"comment"
] = "Successfully replaced alias {} for index {}".format(
name, index
)
else:
ret["result"] = False
ret["comment"] = "Cannot create alias {} for index {}, {}".format(
name, index, output
)
else:
ret["comment"] = "Alias {} for index {} is already present".format(
name, index
)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def index_template_absent(name):
"""
Ensure that the named index template is absent.
name
Name of the index to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index_template = __salt__["elasticsearch.index_template_get"](name=name)
if index_template and name in index_template:
if __opts__["test"]:
ret["comment"] = "Index template {} will be removed".format(name)
ret["changes"]["old"] = index_template[name]
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.index_template_delete"](
name=name
)
if ret["result"]:
ret["comment"] = "Successfully removed index template {}".format(
name
)
ret["changes"]["old"] = index_template[name]
else:
ret[
"comment"
] = "Failed to remove index template {} for unknown reasons".format(
name
)
else:
ret["comment"] = "Index template {} is already absent".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def index_template_present(name, definition, check_definition=False):
"""
Ensure that the named index template is present.
name
Name of the index to add
definition
Required dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html
check_definition
If the template already exists and the definition is up to date
**Example:**
.. code-block:: yaml
mytestindex2_template:
elasticsearch.index_template_present:
- definition:
template: logstash-*
order: 1
settings:
number_of_shards: 1
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index_template_exists = __salt__["elasticsearch.index_template_exists"](
name=name
)
if not index_template_exists:
if __opts__["test"]:
ret[
"comment"
] = "Index template {} does not exist and will be created".format(name)
ret["changes"] = {"new": definition}
ret["result"] = None
else:
output = __salt__["elasticsearch.index_template_create"](
name=name, body=definition
)
if output:
ret["comment"] = "Successfully created index template {}".format(
name
)
ret["changes"] = {
"new": __salt__["elasticsearch.index_template_get"](name=name)[
name
]
}
else:
ret["result"] = False
ret["comment"] = "Cannot create index template {}, {}".format(
name, output
)
else:
if check_definition:
if isinstance(definition, str):
definition_parsed = salt.utils.json.loads(definition)
else:
definition_parsed = definition
current_template = __salt__["elasticsearch.index_template_get"](
name=name
)[name]
# Prune empty keys (avoid false positive diff)
for key in ("mappings", "aliases", "settings"):
if current_template[key] == {} and key not in definition_parsed:
del current_template[key]
diff = __utils__["dictdiffer.deep_diff"](
current_template, definition_parsed
)
if len(diff) != 0:
if __opts__["test"]:
ret[
"comment"
] = "Index template {} exist but need to be updated".format(
name
)
ret["changes"] = diff
ret["result"] = None
else:
output = __salt__["elasticsearch.index_template_create"](
name=name, body=definition
)
if output:
ret[
"comment"
] = "Successfully updated index template {}".format(name)
ret["changes"] = diff
else:
ret["result"] = False
ret[
"comment"
] = "Cannot update index template {}, {}".format(
name, output
)
else:
ret[
"comment"
] = "Index template {} is already present and up to date".format(
name
)
else:
ret["comment"] = "Index template {} is already present".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def pipeline_absent(name):
"""
Ensure that the named pipeline is absent
name
Name of the pipeline to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
pipeline = __salt__["elasticsearch.pipeline_get"](id=name)
if pipeline and name in pipeline:
if __opts__["test"]:
ret["comment"] = "Pipeline {} will be removed".format(name)
ret["changes"]["old"] = pipeline[name]
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.pipeline_delete"](id=name)
if ret["result"]:
ret["comment"] = "Successfully removed pipeline {}".format(name)
ret["changes"]["old"] = pipeline[name]
else:
ret[
"comment"
] = "Failed to remove pipeline {} for unknown reasons".format(name)
else:
ret["comment"] = "Pipeline {} is already absent".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def pipeline_present(name, definition):
"""
Ensure that the named pipeline is present.
name
Name of the index to add
definition
Required dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/master/pipeline.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.pipeline_present:
- definition:
description: example pipeline
processors:
- set:
field: collector_timestamp_millis
value: '{{ '{{' }}_ingest.timestamp{{ '}}' }}'
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
pipeline = __salt__["elasticsearch.pipeline_get"](id=name)
old = {}
if pipeline and name in pipeline:
old = pipeline[name]
ret["changes"] = __utils__["dictdiffer.deep_diff"](old, definition)
if ret["changes"] or not definition:
if __opts__["test"]:
if not pipeline:
ret[
"comment"
] = "Pipeline {} does not exist and will be created".format(name)
else:
ret["comment"] = (
"Pipeline {} exists with wrong configuration and will be"
" overridden".format(name)
)
ret["result"] = None
else:
output = __salt__["elasticsearch.pipeline_create"](
id=name, body=definition
)
if output:
if not pipeline:
ret["comment"] = "Successfully created pipeline {}".format(name)
else:
ret["comment"] = "Successfully replaced pipeline {}".format(
name
)
else:
ret["result"] = False
ret["comment"] = "Cannot create pipeline {}, {}".format(
name, output
)
else:
ret["comment"] = "Pipeline {} is already present".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def search_template_absent(name):
"""
Ensure that the search template is absent
name
Name of the search template to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
template = __salt__["elasticsearch.search_template_get"](id=name)
if template:
if __opts__["test"]:
ret["comment"] = "Search template {} will be removed".format(name)
ret["changes"]["old"] = salt.utils.json.loads(template["template"])
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.search_template_delete"](
id=name
)
if ret["result"]:
ret["comment"] = "Successfully removed search template {}".format(
name
)
ret["changes"]["old"] = salt.utils.json.loads(template["template"])
else:
ret[
"comment"
] = "Failed to remove search template {} for unknown reasons".format(
name
)
else:
ret["comment"] = "Search template {} is already absent".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def search_template_present(name, definition):
"""
Ensure that the named search template is present.
name
Name of the search template to add
definition
Required dict for creation parameters as per http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.search_template_present:
- definition:
inline:
size: 10
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
template = __salt__["elasticsearch.search_template_get"](id=name)
old = {}
if template:
old = salt.utils.json.loads(template["template"])
ret["changes"] = __utils__["dictdiffer.deep_diff"](old, definition)
if ret["changes"] or not definition:
if __opts__["test"]:
if not template:
ret[
"comment"
] = "Search template {} does not exist and will be created".format(
name
)
else:
ret["comment"] = (
"Search template {} exists with wrong configuration and will be"
" overridden".format(name)
)
ret["result"] = None
else:
output = __salt__["elasticsearch.search_template_create"](
id=name, body=definition
)
if output:
if not template:
ret[
"comment"
] = "Successfully created search template {}".format(name)
else:
ret[
"comment"
] = "Successfully replaced search template {}".format(name)
else:
ret["result"] = False
ret["comment"] = "Cannot create search template {}, {}".format(
name, output
)
else:
ret["comment"] = "Search template {} is already present".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/elasticsearch.py | 0.491212 | 0.190611 | elasticsearch.py | pypi |
def __virtual__():
"""
Only load if a2ensite is available.
"""
if "apache.a2ensite" in __salt__:
return "apache_site"
return (False, "apache module could not be loaded")
def enabled(name):
"""
Ensure an Apache site is enabled.
name
Name of the Apache site
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_enabled = __salt__["apache.check_site_enabled"](name)
if not is_enabled:
if __opts__["test"]:
msg = "Apache site {} is set to be enabled.".format(name)
ret["comment"] = msg
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["result"] = None
return ret
status = __salt__["apache.a2ensite"](name)["Status"]
if isinstance(status, str) and "enabled" in status:
ret["result"] = True
ret["changes"]["old"] = None
ret["changes"]["new"] = name
else:
ret["result"] = False
ret["comment"] = "Failed to enable {} Apache site".format(name)
if isinstance(status, str):
ret["comment"] = ret["comment"] + " ({})".format(status)
return ret
else:
ret["comment"] = "{} already enabled.".format(name)
return ret
def disabled(name):
"""
Ensure an Apache site is disabled.
name
Name of the Apache site
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_enabled = __salt__["apache.check_site_enabled"](name)
if is_enabled:
if __opts__["test"]:
msg = "Apache site {} is set to be disabled.".format(name)
ret["comment"] = msg
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["result"] = None
return ret
status = __salt__["apache.a2dissite"](name)["Status"]
if isinstance(status, str) and "disabled" in status:
ret["result"] = True
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to disable {} Apache site".format(name)
if isinstance(status, str):
ret["comment"] = ret["comment"] + " ({})".format(status)
return ret
else:
ret["comment"] = "{} already disabled.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/apache_site.py | 0.557725 | 0.160957 | apache_site.py | pypi |
def __virtual__():
"""
Only load if the splunk_search module is available in __salt__
"""
if "splunk_search.get" in __salt__:
return "splunk_search"
return (False, "splunk module could not be loaded")
def present(name, profile="splunk", **kwargs):
"""
Ensure a search is present
.. code-block:: yaml
API Error Search:
splunk_search.present:
search: index=main sourcetype=blah
template: alert_5min
The following parameters are required:
name
This is the name of the search in splunk
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
target = __salt__["splunk_search.get"](name, profile=profile)
if target:
if __opts__["test"]:
ret["comment"] = "Would update {}".format(name)
return ret
# found a search... updating
result = __salt__["splunk_search.update"](name, profile=profile, **kwargs)
if not result:
# no update
ret["result"] = True
ret["comment"] = "No changes"
else:
(newvalues, diffs) = result
old_content = dict(target.content)
old_changes = {}
for x in newvalues:
old_changes[x] = old_content.get(x, None)
ret["result"] = True
ret["changes"]["diff"] = diffs
ret["changes"]["old"] = old_changes
ret["changes"]["new"] = newvalues
else:
if __opts__["test"]:
ret["comment"] = "Would create {}".format(name)
return ret
# creating a new search
result = __salt__["splunk_search.create"](name, profile=profile, **kwargs)
if result:
ret["result"] = True
ret["changes"]["old"] = False
ret["changes"]["new"] = kwargs
else:
ret["result"] = False
ret["comment"] = "Failed to create {}".format(name)
return ret
def absent(name, profile="splunk"):
"""
Ensure a search is absent
.. code-block:: yaml
API Error Search:
splunk_search.absent
The following parameters are required:
name
This is the name of the search in splunk
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "{} is absent.".format(name),
}
target = __salt__["splunk_search.get"](name, profile=profile)
if target:
if __opts__["test"]:
ret = {}
ret["name"] = name
ret["comment"] = "Would delete {}".format(name)
ret["result"] = None
return ret
result = __salt__["splunk_search.delete"](name, profile=profile)
if result:
ret["comment"] = "{} was deleted".format(name)
else:
ret["comment"] = "Failed to delete {}".format(name)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/splunk_search.py | 0.678647 | 0.23814 | splunk_search.py | pypi |
from salt.exceptions import SaltInvocationError
def __virtual__():
"""
Only load if the slack module is available in __salt__
"""
if "slack.post_message" in __salt__:
return "slack"
return (False, "slack module could not be loaded")
def post_message(name, **kwargs):
"""
Send a message to a Slack channel.
.. code-block:: yaml
slack-message:
slack.post_message:
- channel: '#general'
- from_name: SuperAdmin
- message: 'This state was executed successfully.'
- api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
The following parameters are required:
api_key parameters:
name
The unique name for this event.
channel
The channel to send the message to. Can either be the ID or the name.
from_name
The name of that is to be shown in the "from" field.
message
The message that is to be sent to the Slack channel.
The following parameters are optional:
api_key
The api key for Slack to use for authentication,
if not specified in the configuration options of master or minion.
icon
URL to an image to use as the icon for this message
webhook parameters:
name
The unique name for this event.
message
The message that is to be sent to the Slack channel.
color
The color of border of left side
short
An optional flag indicating whether the value is short
enough to be displayed side-by-side with other values.
webhook
The identifier of WebHook (URL or token).
channel
The channel to use instead of the WebHook default.
username
Username to use instead of WebHook default.
icon_emoji
Icon to use instead of WebHook default.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
api_key = kwargs.get("api_key")
webhook = kwargs.get("webhook")
# If neither api_key and webhook are not provided at the CLI, check the config
if not api_key and not webhook:
api_key = __salt__["config.get"]("slack.api_key") or __salt__["config.get"](
"slack:api_key"
)
if not api_key:
webhook = __salt__["config.get"]("slack.hook") or __salt__["config.get"](
"slack:hook"
)
if not webhook:
ret["comment"] = "Please specify api_key or webhook."
return ret
if api_key and webhook:
ret["comment"] = "Please specify only either api_key or webhook."
return ret
if api_key and not kwargs.get("channel"):
ret["comment"] = "Slack channel is missing."
return ret
if api_key and not kwargs.get("from_name"):
ret["comment"] = "Slack from name is missing."
return ret
if not kwargs.get("message"):
ret["comment"] = "Slack message is missing."
return ret
if __opts__["test"]:
ret["comment"] = "The following message is to be sent to Slack: {}".format(
kwargs.get("message")
)
ret["result"] = None
return ret
try:
if api_key:
result = __salt__["slack.post_message"](
channel=kwargs.get("channel"),
message=kwargs.get("message"),
from_name=kwargs.get("from_name"),
api_key=kwargs.get("api_key"),
icon=kwargs.get("icon"),
)
elif webhook:
result = __salt__["slack.call_hook"](
message=kwargs.get("message"),
attachment=kwargs.get("attachment"),
color=kwargs.get("color", "good"),
short=kwargs.get("short"),
identifier=kwargs.get("webhook"),
channel=kwargs.get("channel"),
username=kwargs.get("username"),
icon_emoji=kwargs.get("icon_emoji"),
)
except SaltInvocationError as sie:
ret["comment"] = "Failed to send message ({}): {}".format(sie, name)
else:
if isinstance(result, bool) and result:
ret["result"] = True
ret["comment"] = "Sent message: {}".format(name)
else:
ret["comment"] = "Failed to send message ({}): {}".format(
result["message"], name
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/slack.py | 0.76176 | 0.199659 | slack.py | pypi |
def __virtual__():
"""
Only load if a2enmod is available.
"""
if "apache.a2enmod" in __salt__:
return "apache_module"
return (False, "apache module could not be loaded")
def enabled(name):
"""
Ensure an Apache module is enabled.
.. versionadded:: 2016.3.0
name
Name of the Apache module
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_enabled = __salt__["apache.check_mod_enabled"](name)
if not is_enabled:
if __opts__["test"]:
msg = "Apache module {} is set to be enabled.".format(name)
ret["comment"] = msg
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["result"] = None
return ret
status = __salt__["apache.a2enmod"](name)["Status"]
if isinstance(status, str) and "enabled" in status:
ret["result"] = True
ret["changes"]["old"] = None
ret["changes"]["new"] = name
else:
ret["result"] = False
ret["comment"] = "Failed to enable {} Apache module".format(name)
if isinstance(status, str):
ret["comment"] = ret["comment"] + " ({})".format(status)
return ret
else:
ret["comment"] = "{} already enabled.".format(name)
return ret
def disabled(name):
"""
Ensure an Apache module is disabled.
.. versionadded:: 2016.3.0
name
Name of the Apache module
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_enabled = __salt__["apache.check_mod_enabled"](name)
if is_enabled:
if __opts__["test"]:
msg = "Apache module {} is set to be disabled.".format(name)
ret["comment"] = msg
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["result"] = None
return ret
status = __salt__["apache.a2dismod"](name)["Status"]
if isinstance(status, str) and "disabled" in status:
ret["result"] = True
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to disable {} Apache module".format(name)
if isinstance(status, str):
ret["comment"] = ret["comment"] + " ({})".format(status)
return ret
else:
ret["comment"] = "{} already disabled.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/apache_module.py | 0.592431 | 0.158467 | apache_module.py | pypi |
from salt.exceptions import SaltInvocationError
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_asg.exists" in __salt__:
return "boto_lc"
return (False, "boto_asg module could not be loaded")
def present(
name,
image_id,
key_name=None,
vpc_id=None,
vpc_name=None,
security_groups=None,
user_data=None,
cloud_init=None,
instance_type="m1.small",
kernel_id=None,
ramdisk_id=None,
block_device_mappings=None,
delete_on_termination=None,
instance_monitoring=False,
spot_price=None,
instance_profile_name=None,
ebs_optimized=False,
associate_public_ip_address=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure the launch configuration exists.
name
Name of the launch configuration.
image_id
AMI to use for instances. AMI must exist or creation of the launch
configuration will fail.
key_name
Name of the EC2 key pair to use for instances. Key must exist or
creation of the launch configuration will fail.
vpc_id
The VPC id where the security groups are defined. Only necessary when
using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_name.
vpc_name
Name of the VPC where the security groups are defined. Only Necessary
when using named security groups that exist outside of the default VPC.
Mutually exclusive with vpc_id.
security_groups
List of Names or security group id’s of the security groups with which
to associate the EC2 instances or VPC instances, respectively. Security
groups must exist, or creation of the launch configuration will fail.
user_data
The user data available to launched EC2 instances.
cloud_init
A dict of cloud_init configuration. Currently supported keys:
boothooks, scripts and cloud-config.
Mutually exclusive with user_data.
instance_type
The instance type. ex: m1.small.
kernel_id
The kernel id for the instance.
ramdisk_id
The RAM disk ID for the instance.
block_device_mappings
A dict of block device mappings that contains a dict
with volume_type, delete_on_termination, iops, size, encrypted,
snapshot_id.
volume_type
Indicates what volume type to use. Valid values are standard, io1, gp2.
Default is standard.
delete_on_termination
Whether the volume should be explicitly marked for deletion when its instance is
terminated (True), or left around (False). If not provided, or None is explicitly passed,
the default AWS behaviour is used, which is True for ROOT volumes of instances, and
False for all others.
iops
For Provisioned IOPS (SSD) volumes only. The number of I/O operations per
second (IOPS) to provision for the volume.
size
Desired volume size (in GiB).
encrypted
Indicates whether the volume should be encrypted. Encrypted EBS volumes must
be attached to instances that support Amazon EBS encryption. Volumes that are
created from encrypted snapshots are automatically encrypted. There is no way
to create an encrypted volume from an unencrypted snapshot or an unencrypted
volume from an encrypted snapshot.
instance_monitoring
Whether instances in group are launched with detailed monitoring.
spot_price
The spot price you are bidding. Only applies if you are building an
autoscaling group with spot instances.
instance_profile_name
The name or the Amazon Resource Name (ARN) of the instance profile
associated with the IAM role for the instance. Instance profile must
exist or the creation of the launch configuration will fail.
ebs_optimized
Specifies whether the instance is optimized for EBS I/O (true) or not
(false).
associate_public_ip_address
Used for Auto Scaling groups that launch instances into an Amazon
Virtual Private Cloud. Specifies whether to assign a public IP address
to each instance launched in a Amazon VPC.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
if user_data and cloud_init:
raise SaltInvocationError(
"user_data and cloud_init are mutually exclusive options."
)
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_asg.launch_configuration_exists"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
if __opts__["test"]:
msg = "Launch configuration set to be created."
ret["comment"] = msg
ret["result"] = None
return ret
if cloud_init:
user_data = __salt__["boto_asg.get_cloud_init_mime"](cloud_init)
# TODO: Ensure image_id, key_name, security_groups and instance_profile
# exist, or throw an invocation error.
created = __salt__["boto_asg.create_launch_configuration"](
name,
image_id,
key_name=key_name,
vpc_id=vpc_id,
vpc_name=vpc_name,
security_groups=security_groups,
user_data=user_data,
instance_type=instance_type,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
block_device_mappings=block_device_mappings,
delete_on_termination=delete_on_termination,
instance_monitoring=instance_monitoring,
spot_price=spot_price,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"]["old"] = None
ret["changes"]["new"] = name
else:
ret["result"] = False
ret["comment"] = "Failed to create launch configuration."
else:
ret["comment"] = "Launch configuration present."
return ret
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure the named launch configuration is deleted.
name
Name of the launch configuration.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_asg.launch_configuration_exists"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if exists:
if __opts__["test"]:
ret["comment"] = "Launch configuration set to be deleted."
ret["result"] = None
return ret
deleted = __salt__["boto_asg.delete_launch_configuration"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if deleted:
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["comment"] = "Deleted launch configuration."
else:
ret["result"] = False
ret["comment"] = "Failed to delete launch configuration."
else:
ret["comment"] = "Launch configuration does not exist."
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_lc.py | 0.663451 | 0.270546 | boto_lc.py | pypi |
def __virtual__():
"""
Only make these states available if Zabbix module is available.
"""
if "zabbix.mediatype_create" in __salt__:
return True
return (False, "zabbix module could not be loaded")
def present(name, mediatype, **kwargs):
"""
Creates new mediatype.
NOTE: This function accepts all standard mediatype properties: keyword argument names differ depending on your
zabbix version, see:
https://www.zabbix.com/documentation/3.0/manual/api/reference/host/object#host_inventory
:param name: name of the mediatype
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
make_new_mediatype:
zabbix_mediatype.present:
- name: 'Email'
- mediatype: 0
- smtp_server: smtp.example.com
- smtp_hello: zabbix.example.com
- smtp_email: zabbix@example.com
"""
connection_args = {}
if "_connection_user" in kwargs:
connection_args["_connection_user"] = kwargs["_connection_user"]
if "_connection_password" in kwargs:
connection_args["_connection_password"] = kwargs["_connection_password"]
if "_connection_url" in kwargs:
connection_args["_connection_url"] = kwargs["_connection_url"]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_mediatype_created = "Mediatype {} created.".format(name)
comment_mediatype_updated = "Mediatype {} updated.".format(name)
comment_mediatype_notcreated = "Unable to create mediatype: {}. ".format(name)
comment_mediatype_exists = "Mediatype {} already exists.".format(name)
changes_mediatype_created = {
name: {
"old": "Mediatype {} does not exist.".format(name),
"new": "Mediatype {} created.".format(name),
}
}
# Zabbix API expects script parameters as a string of arguments separated by newline characters
if "exec_params" in kwargs:
if isinstance(kwargs["exec_params"], list):
kwargs["exec_params"] = "\n".join(kwargs["exec_params"]) + "\n"
else:
kwargs["exec_params"] = str(kwargs["exec_params"]) + "\n"
mediatype_exists = __salt__["zabbix.mediatype_get"](name, **connection_args)
if mediatype_exists:
mediatypeobj = mediatype_exists[0]
mediatypeid = int(mediatypeobj["mediatypeid"])
update_email = False
update_email_port = False
update_email_security = False
update_email_verify_peer = False
update_email_verify_host = False
update_email_auth = False
update_script = False
update_script_params = False
update_sms = False
update_jabber = False
update_eztext = False
update_status = False
if (
int(mediatype) == 0
and "smtp_server" in kwargs
and "smtp_helo" in kwargs
and "smtp_email" in kwargs
):
if (
int(mediatype) != int(mediatypeobj["type"])
or kwargs["smtp_server"] != mediatypeobj["smtp_server"]
or kwargs["smtp_email"] != mediatypeobj["smtp_email"]
or kwargs["smtp_helo"] != mediatypeobj["smtp_helo"]
):
update_email = True
if int(mediatype) == 0 and "smtp_port" in kwargs:
if int(kwargs["smtp_port"]) != int(mediatypeobj["smtp_port"]):
update_email_port = True
if int(mediatype) == 0 and "smtp_security" in kwargs:
if int(kwargs["smtp_security"]) != int(mediatypeobj["smtp_security"]):
update_email_security = True
if int(mediatype) == 0 and "smtp_verify_peer" in kwargs:
if int(kwargs["smtp_verify_peer"]) != int(mediatypeobj["smtp_verify_peer"]):
update_email_verify_peer = True
if int(mediatype) == 0 and "smtp_verify_host" in kwargs:
if int(kwargs["smtp_verify_host"]) != int(mediatypeobj["smtp_verify_host"]):
update_email_verify_host = True
if (
int(mediatype) == 0
and "smtp_authentication" in kwargs
and "username" in kwargs
and "passwd" in kwargs
):
if (
int(kwargs["smtp_authentication"])
!= int(mediatypeobj["smtp_authentication"])
or kwargs["username"] != mediatypeobj["username"]
or kwargs["passwd"] != mediatypeobj["passwd"]
):
update_email_auth = True
if int(mediatype) == 1 and "exec_path" in kwargs:
if (
int(mediatype) != int(mediatypeobj["type"])
or kwargs["exec_path"] != mediatypeobj["exec_path"]
):
update_script = True
if int(mediatype) == 1 and "exec_params" in kwargs:
if kwargs["exec_params"] != mediatypeobj["exec_params"]:
update_script_params = True
if int(mediatype) == 2 and "gsm_modem" in kwargs:
if (
int(mediatype) != int(mediatypeobj["type"])
or kwargs["gsm_modem"] != mediatypeobj["gsm_modem"]
):
update_sms = True
if int(mediatype) == 3 and "username" in kwargs and "passwd" in kwargs:
if (
int(mediatype) != int(mediatypeobj["type"])
or kwargs["username"] != mediatypeobj["username"]
or kwargs["passwd"] != mediatypeobj["passwd"]
):
update_jabber = True
if (
int(mediatype) == 100
and "username" in kwargs
and "passwd" in kwargs
and "exec_path" in kwargs
):
if (
int(mediatype) != int(mediatypeobj["type"])
or kwargs["username"] != mediatypeobj["username"]
or kwargs["passwd"] != mediatypeobj["passwd"]
or kwargs["exec_path"] != mediatypeobj["exec_path"]
):
update_eztext = True
if "status" in kwargs:
if int(kwargs["status"]) != int(mediatypeobj["status"]):
update_status = True
# Dry run, test=true mode
if __opts__["test"]:
if mediatype_exists:
if update_status:
ret["result"] = None
ret["comment"] = comment_mediatype_updated
else:
ret["result"] = True
ret["comment"] = comment_mediatype_exists
else:
ret["result"] = None
ret["comment"] = comment_mediatype_created
return ret
error = []
if mediatype_exists:
if (
update_email
or update_email_port
or update_email_security
or update_email_verify_peer
or update_email_verify_host
or update_email_auth
or update_script
or update_script_params
or update_sms
or update_jabber
or update_eztext
or update_status
):
ret["result"] = True
ret["comment"] = comment_mediatype_updated
if update_email:
updated_email = __salt__["zabbix.mediatype_update"](
mediatypeid,
type=mediatype,
smtp_server=kwargs["smtp_server"],
smtp_helo=kwargs["smtp_helo"],
smtp_email=kwargs["smtp_email"],
**connection_args
)
if "error" in updated_email:
error.append(updated_email["error"])
else:
ret["changes"]["smtp_server"] = kwargs["smtp_server"]
ret["changes"]["smtp_helo"] = kwargs["smtp_helo"]
ret["changes"]["smtp_email"] = kwargs["smtp_email"]
if update_email_port:
updated_email_port = __salt__["zabbix.mediatype_update"](
mediatypeid, smtp_port=kwargs["smtp_port"], **connection_args
)
if "error" in updated_email_port:
error.append(updated_email_port["error"])
else:
ret["changes"]["smtp_port"] = kwargs["smtp_port"]
if update_email_security:
updated_email_security = __salt__["zabbix.mediatype_update"](
mediatypeid,
smtp_security=kwargs["smtp_security"],
**connection_args
)
if "error" in updated_email_security:
error.append(updated_email_security["error"])
else:
ret["changes"]["smtp_security"] = kwargs["smtp_security"]
if update_email_verify_peer:
updated_email_verify_peer = __salt__["zabbix.mediatype_update"](
mediatypeid,
smtp_verify_peer=kwargs["smtp_verify_peer"],
**connection_args
)
if "error" in updated_email_verify_peer:
error.append(updated_email_verify_peer["error"])
else:
ret["changes"]["smtp_verify_peer"] = kwargs["smtp_verify_peer"]
if update_email_verify_host:
updated_email_verify_host = __salt__["zabbix.mediatype_update"](
mediatypeid,
smtp_verify_host=kwargs["smtp_verify_host"],
**connection_args
)
if "error" in updated_email_verify_host:
error.append(updated_email_verify_host["error"])
else:
ret["changes"]["smtp_verify_host"] = kwargs["smtp_verify_host"]
if update_email_auth:
updated_email_auth = __salt__["zabbix.mediatype_update"](
mediatypeid,
username=kwargs["username"],
passwd=kwargs["passwd"],
smtp_authentication=kwargs["smtp_authentication"],
**connection_args
)
if "error" in updated_email_auth:
error.append(updated_email_auth["error"])
else:
ret["changes"]["smtp_authentication"] = kwargs[
"smtp_authentication"
]
ret["changes"]["username"] = kwargs["username"]
if update_script:
updated_script = __salt__["zabbix.mediatype_update"](
mediatypeid,
type=mediatype,
exec_path=kwargs["exec_path"],
**connection_args
)
if "error" in updated_script:
error.append(updated_script["error"])
else:
ret["changes"]["exec_path"] = kwargs["exec_path"]
if update_script_params:
updated_script_params = __salt__["zabbix.mediatype_update"](
mediatypeid, exec_params=kwargs["exec_params"], **connection_args
)
if "error" in updated_script_params:
error.append(updated_script["error"])
else:
ret["changes"]["exec_params"] = kwargs["exec_params"]
if update_sms:
updated_sms = __salt__["zabbix.mediatype_update"](
mediatypeid,
type=mediatype,
gsm_modem=kwargs["gsm_modem"],
**connection_args
)
if "error" in updated_sms:
error.append(updated_sms["error"])
else:
ret["changes"]["gsm_modem"] = kwargs["gsm_modem"]
if update_jabber:
updated_jabber = __salt__["zabbix.mediatype_update"](
mediatypeid,
type=mediatype,
username=kwargs["username"],
passwd=kwargs["passwd"],
**connection_args
)
if "error" in updated_jabber:
error.append(updated_jabber["error"])
else:
ret["changes"]["username"] = kwargs["username"]
if update_eztext:
updated_eztext = __salt__["zabbix.mediatype_update"](
mediatypeid,
type=mediatype,
username=kwargs["username"],
passwd=kwargs["passwd"],
exec_path=kwargs["exec_path"],
**connection_args
)
if "error" in updated_eztext:
error.append(updated_eztext["error"])
else:
ret["changes"]["username"] = kwargs["username"]
ret["changes"]["exec_path"] = kwargs["exec_path"]
if update_status:
updated_status = __salt__["zabbix.mediatype_update"](
mediatypeid, status=kwargs["status"], **connection_args
)
if "error" in updated_status:
error.append(updated_status["error"])
else:
ret["changes"]["status"] = kwargs["status"]
else:
ret["result"] = True
ret["comment"] = comment_mediatype_exists
else:
mediatype_create = __salt__["zabbix.mediatype_create"](
name, mediatype, **kwargs
)
if "error" not in mediatype_create:
ret["result"] = True
ret["comment"] = comment_mediatype_created
ret["changes"] = changes_mediatype_created
else:
ret["result"] = False
ret["comment"] = comment_mediatype_notcreated + str(
mediatype_create["error"]
)
# error detected
if error:
ret["changes"] = {}
ret["result"] = False
ret["comment"] = str(error)
return ret
def absent(name, **kwargs):
"""
Ensures that the mediatype does not exist, eventually deletes the mediatype.
:param name: name of the mediatype
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
delete_mediatype:
zabbix_mediatype.absent:
- name: 'Email'
"""
connection_args = {}
if "_connection_user" in kwargs:
connection_args["_connection_user"] = kwargs["_connection_user"]
if "_connection_password" in kwargs:
connection_args["_connection_password"] = kwargs["_connection_password"]
if "_connection_url" in kwargs:
connection_args["_connection_url"] = kwargs["_connection_url"]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_mediatype_deleted = "Mediatype {} deleted.".format(name)
comment_mediatype_notdeleted = "Unable to delete mediatype: {}. ".format(name)
comment_mediatype_notexists = "Mediatype {} does not exist.".format(name)
changes_mediatype_deleted = {
name: {
"old": "Mediatype {} exists.".format(name),
"new": "Mediatype {} deleted.".format(name),
}
}
mediatype_exists = __salt__["zabbix.mediatype_get"](name, **connection_args)
# Dry run, test=true mode
if __opts__["test"]:
if not mediatype_exists:
ret["result"] = True
ret["comment"] = comment_mediatype_notexists
else:
ret["result"] = None
ret["comment"] = comment_mediatype_deleted
return ret
if not mediatype_exists:
ret["result"] = True
ret["comment"] = comment_mediatype_notexists
else:
try:
mediatypeid = mediatype_exists[0]["mediatypeid"]
mediatype_delete = __salt__["zabbix.mediatype_delete"](
mediatypeid, **connection_args
)
except KeyError:
mediatype_delete = False
if mediatype_delete and "error" not in mediatype_delete:
ret["result"] = True
ret["comment"] = comment_mediatype_deleted
ret["changes"] = changes_mediatype_deleted
else:
ret["result"] = False
ret["comment"] = comment_mediatype_notdeleted + str(
mediatype_delete["error"]
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/zabbix_mediatype.py | 0.58166 | 0.185947 | zabbix_mediatype.py | pypi |
__virtualname__ = "neutron_network"
def __virtual__():
if "neutronng.list_networks" in __salt__:
return __virtualname__
return (
False,
"The neutronng execution module failed to load: shade python module is not available",
)
def present(name, auth=None, **kwargs):
"""
Ensure a network exists and is up-to-date
name
Name of the network
provider
A dict of network provider options.
shared
Set the network as shared.
external
Whether this network is externally accessible.
admin_state_up
Set the network administrative state to up.
vlan
Vlan ID. Alias for provider
- physical_network: provider
- network_type: vlan
- segmentation_id: (vlan id)
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
kwargs["name"] = name
network = __salt__["neutronng.network_get"](name=name)
if network is None:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Network will be created."
return ret
if "vlan" in kwargs:
kwargs["provider"] = {
"physical_network": "provider",
"network_type": "vlan",
"segmentation_id": kwargs["vlan"],
}
del kwargs["vlan"]
if "project" in kwargs:
projectname = kwargs["project"]
project = __salt__["keystoneng.project_get"](name=projectname)
if project:
kwargs["project_id"] = project.id
del kwargs["project"]
else:
ret["result"] = False
ret["comment"] = "Project:{} not found.".format(projectname)
return ret
network = __salt__["neutronng.network_create"](**kwargs)
ret["changes"] = network
ret["comment"] = "Created network"
return ret
changes = __salt__["neutronng.compare_changes"](network, **kwargs)
# there's no method for network update in shade right now;
# can only delete and recreate
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Project will be updated."
return ret
__salt__["neutronng.network_delete"](name=network)
__salt__["neutronng.network_create"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated network"
return ret
def absent(name, auth=None, **kwargs):
"""
Ensure a network does not exists
name
Name of the network
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
kwargs["name"] = name
network = __salt__["neutronng.network_get"](name=name)
if network:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"id": network.id}
ret["comment"] = "Network will be deleted."
return ret
__salt__["neutronng.network_delete"](name=network)
ret["changes"]["id"] = network.id
ret["comment"] = "Deleted network"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/neutron_network.py | 0.719581 | 0.198744 | neutron_network.py | pypi |
from salt.exceptions import CommandExecutionError
def __virtual__():
"""
Only load if the openstack_config module is in __salt__
"""
if "openstack_config.get" not in __salt__:
return (False, "openstack_config module could not be loaded")
if "openstack_config.set" not in __salt__:
return False
if "openstack_config.delete" not in __salt__:
return False
return True
def present(name, filename, section, value, parameter=None):
"""
Ensure a value is set in an OpenStack configuration file.
filename
The full path to the configuration file
section
The section in which the parameter will be set
parameter (optional)
The parameter to change. If the parameter is not supplied, the name will be used as the parameter.
value
The value to set
"""
if parameter is None:
parameter = name
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
try:
old_value = __salt__["openstack_config.get"](
filename=filename, section=section, parameter=parameter
)
if old_value == value:
ret["result"] = True
ret["comment"] = "The value is already set to the correct value"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value '{}' is set to be changed to '{}'.".format(
old_value, value
)
return ret
except CommandExecutionError as err:
if not str(err).lower().startswith("parameter not found:"):
raise
__salt__["openstack_config.set"](
filename=filename, section=section, parameter=parameter, value=value
)
ret["changes"] = {"Value": "Updated"}
ret["result"] = True
ret["comment"] = "The value has been updated"
return ret
def absent(name, filename, section, parameter=None):
"""
Ensure a value is not set in an OpenStack configuration file.
filename
The full path to the configuration file
section
The section in which the parameter will be set
parameter (optional)
The parameter to change. If the parameter is not supplied, the name will be used as the parameter.
"""
if parameter is None:
parameter = name
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
try:
old_value = __salt__["openstack_config.get"](
filename=filename, section=section, parameter=parameter
)
except CommandExecutionError as err:
if str(err).lower().startswith("parameter not found:"):
ret["result"] = True
ret["comment"] = "The value is already absent"
return ret
raise
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value '{}' is set to be deleted.".format(old_value)
return ret
__salt__["openstack_config.delete"](
filename=filename, section=section, parameter=parameter
)
ret["changes"] = {"Value": "Deleted"}
ret["result"] = True
ret["comment"] = "The value has been deleted"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/openstack_config.py | 0.622689 | 0.20264 | openstack_config.py | pypi |
import copy
import salt.utils.json
from salt.exceptions import SaltInvocationError
from salt.utils.dictdiffer import DictDiffer
def __virtual__():
"""
Only load if grafana is available.
"""
if "elasticsearch.exists" in __salt__:
return "grafana"
return (False, "elasticsearch module could not be loaded")
def _parse_profile(profile):
"""
From a pillar key, or a dictionary, return index and host keys.
"""
if isinstance(profile, str):
_profile = __salt__["config.option"](profile)
if not _profile:
msg = "Pillar key for profile {} not found.".format(profile)
raise SaltInvocationError(msg)
else:
_profile = profile
hosts = _profile.get("hosts")
index = _profile.get("index")
return (hosts, index)
def _rows_differ(row, _row):
"""
Check if grafana dashboard row and _row differ
"""
row_copy = copy.deepcopy(row)
_row_copy = copy.deepcopy(_row)
# Strip id from all panels in both rows, since they are always generated.
for panel in row_copy["panels"]:
if "id" in panel:
del panel["id"]
for _panel in _row_copy["panels"]:
if "id" in _panel:
del _panel["id"]
diff = DictDiffer(row_copy, _row_copy)
return diff.changed() or diff.added() or diff.removed()
def dashboard_present(
name,
dashboard=None,
dashboard_from_pillar=None,
rows=None,
rows_from_pillar=None,
profile="grafana",
):
"""
Ensure the grafana dashboard exists and is managed.
name
Name of the grafana dashboard.
dashboard
A dict that defines a dashboard that should be managed.
dashboard_from_pillar
A pillar key that contains a grafana dashboard dict. Mutually exclusive
with dashboard.
rows
A list of grafana rows.
rows_from_pillar
A list of pillar keys that contain lists of grafana dashboard rows.
Rows defined in the pillars will be appended to the rows defined in the
state.
profile
A pillar key or dict that contains a list of hosts and an
elasticsearch index to use.
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if not profile:
raise SaltInvocationError("profile is a required argument.")
if dashboard and dashboard_from_pillar:
raise SaltInvocationError(
"dashboard and dashboard_from_pillar are mutually exclusive arguments."
)
hosts, index = _parse_profile(profile)
if not index:
raise SaltInvocationError("index is a required key in the profile.")
if not dashboard:
dashboard = __salt__["pillar.get"](dashboard_from_pillar)
if not rows:
rows = []
if rows_from_pillar:
for key in rows_from_pillar:
pillar_rows = __salt__["pillar.get"](key)
# Pillar contains a list of rows
if isinstance(pillar_rows, list):
for row in pillar_rows:
rows.append(row)
# Pillar contains a single row
else:
rows.append(pillar_rows)
exists = __salt__["elasticsearch.exists"](
index=index, id=name, doc_type="dashboard", hosts=hosts
)
if exists:
_dashboard = __salt__["elasticsearch.get"](
index=index, id=name, doc_type="dashboard", hosts=hosts
)
_dashboard = _dashboard.get("_source", {}).get("dashboard")
_dashboard = salt.utils.json.loads(_dashboard)
else:
if not dashboard:
raise SaltInvocationError(
"Grafana dashboard does not exist and no"
" dashboard template was provided."
)
if __opts__["test"]:
ret["comment"] = "Dashboard {} is set to be created.".format(name)
ret["result"] = None
return ret
_dashboard = dashboard
update_rows = []
_ids = []
_data = {}
for _n, _row in enumerate(_dashboard["rows"]):
# Collect the unique ids
for _panel in _row["panels"]:
if "id" in _panel:
_ids.append(_panel["id"])
# Collect all of the titles in the existing dashboard
if "title" in _row:
_data[_row["title"]] = _n
_ids.sort()
if not _ids:
_ids = [1]
for row in rows:
if "title" not in row:
raise SaltInvocationError("title is a required key for rows.")
# Each panel needs to have a unique ID
for panel in row["panels"]:
_ids.append(_ids[-1] + 1)
panel["id"] = _ids[-1]
title = row["title"]
# If the title doesn't exist, we need to add this row
if title not in _data:
update_rows.append(title)
_dashboard["rows"].append(row)
continue
# For existing titles, replace the row if it differs
_n = _data[title]
if _rows_differ(row, _dashboard["rows"][_n]):
_dashboard["rows"][_n] = row
update_rows.append(title)
if not update_rows:
ret["result"] = True
ret["comment"] = "Dashboard {} is up to date".format(name)
return ret
if __opts__["test"]:
msg = "Dashboard {} is set to be updated.".format(name)
if update_rows:
msg = "{} The following rows set to be updated: {}".format(msg, update_rows)
ret["comment"] = msg
return ret
body = {
"user": "guest",
"group": "guest",
"title": name,
"dashboard": salt.utils.json.dumps(_dashboard),
}
updated = __salt__["elasticsearch.index"](
index=index, doc_type="dashboard", body=body, id=name, hosts=hosts
)
if updated:
ret["result"] = True
ret["changes"]["changed"] = name
msg = "Updated dashboard {}.".format(name)
if update_rows:
msg = "{} The following rows were updated: {}".format(msg, update_rows)
ret["comment"] = msg
else:
ret["result"] = False
msg = "Failed to update dashboard {}.".format(name)
ret["comment"] = msg
return ret
def dashboard_absent(name, hosts=None, profile="grafana"):
"""
Ensure the named grafana dashboard is deleted.
name
Name of the grafana dashboard.
profile
A pillar key or dict that contains a list of hosts and an
elasticsearch index to use.
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
hosts, index = _parse_profile(profile)
if not index:
raise SaltInvocationError("index is a required key in the profile.")
exists = __salt__["elasticsearch.exists"](
index=index, id=name, doc_type="dashboard", hosts=hosts
)
if exists:
if __opts__["test"]:
ret["comment"] = "Dashboard {} is set to be removed.".format(name)
return ret
deleted = __salt__["elasticsearch.delete"](
index=index, doc_type="dashboard", id=name, hosts=hosts
)
if deleted:
ret["result"] = True
ret["changes"]["old"] = name
ret["changes"]["new"] = None
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} dashboard.".format(name)
else:
ret["result"] = True
ret["comment"] = "Dashboard {} does not exist.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grafana.py | 0.549157 | 0.201892 | grafana.py | pypi |
def __virtual__():
"""
Only load if the influxdb module is available
"""
if "influxdb.db_exists" in __salt__:
return "influxdb_retention_policy"
return (False, "influxdb module could not be loaded")
def convert_duration(duration):
"""
Convert the a duration string into XXhYYmZZs format
duration
Duration to convert
Returns: duration_string
String representation of duration in XXhYYmZZs format
"""
# durations must be specified in days, weeks or hours
if duration.endswith("h"):
hours = int(duration.split("h"))
elif duration.endswith("d"):
days = duration.split("d")
hours = int(days[0]) * 24
elif duration.endswith("w"):
weeks = duration.split("w")
hours = int(weeks[0]) * 24 * 7
duration_string = str(hours) + "h0m0s"
return duration_string
def present(name, database, duration="7d", replication=1, default=False, **client_args):
"""
Ensure that given retention policy is present.
name
Name of the retention policy to create.
database
Database to create retention policy on.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "retention policy {} is already present".format(name),
}
if not __salt__["influxdb.retention_policy_exists"](
name=name, database=database, **client_args
):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = " {} is absent and will be created".format(name)
return ret
if __salt__["influxdb.create_retention_policy"](
database, name, duration, replication, default, **client_args
):
ret["comment"] = "retention policy {} has been created".format(name)
ret["changes"][name] = "Present"
return ret
else:
ret["comment"] = "Failed to create retention policy {}".format(name)
ret["result"] = False
return ret
else:
current_policy = __salt__["influxdb.get_retention_policy"](
database=database, name=name, **client_args
)
update_policy = False
if current_policy["duration"] != convert_duration(duration):
update_policy = True
ret["changes"]["duration"] = "Retention changed from {} to {}.".format(
current_policy["duration"], duration
)
if current_policy["replicaN"] != replication:
update_policy = True
ret["changes"]["replication"] = "Replication changed from {} to {}.".format(
current_policy["replicaN"], replication
)
if current_policy["default"] != default:
update_policy = True
ret["changes"]["default"] = "Default changed from {} to {}.".format(
current_policy["default"], default
)
if update_policy:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = " {} is present and set to be changed".format(name)
return ret
else:
if __salt__["influxdb.alter_retention_policy"](
database, name, duration, replication, default, **client_args
):
ret["comment"] = "retention policy {} has been changed".format(name)
return ret
else:
ret["comment"] = "Failed to update retention policy {}".format(name)
ret["result"] = False
return ret
return ret
def absent(name, database, **client_args):
"""
Ensure that given retention policy is absent.
name
Name of the retention policy to remove.
database
Name of the database that the retention policy was defined on.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "retention policy {} is not present".format(name),
}
if __salt__["influxdb.retention_policy_exists"](database, name, **client_args):
if __opts__["test"]:
ret["result"] = None
ret[
"comment"
] = "retention policy {} is present and needs to be removed".format(name)
return ret
if __salt__["influxdb.drop_retention_policy"](database, name, **client_args):
ret["comment"] = "retention policy {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "Failed to remove retention policy {}".format(name)
ret["result"] = False
return ret
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/influxdb_retention_policy.py | 0.798619 | 0.189934 | influxdb_retention_policy.py | pypi |
import logging
import os
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = "keychain"
def __virtual__():
"""
Only work on Mac OS
"""
if salt.utils.platform.is_darwin():
return __virtualname__
return (False, "Only supported on Mac OS")
def installed(name, password, keychain="/Library/Keychains/System.keychain", **kwargs):
"""
Install a p12 certificate file into the macOS keychain
name
The certificate to install
password
The password for the certificate being installed formatted in the way
described for openssl command in the PASS PHRASE ARGUMENTS section
keychain
The keychain to install the certificate to, this defaults to
/Library/Keychains/System.keychain
allow_any
Allow any application to access the imported certificate without warning
keychain_password
If your keychain is likely to be locked pass the password and it will be unlocked
before running the import
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if "http" in name or "salt" in name:
name = __salt__["cp.cache_file"](name)
certs = __salt__["keychain.list_certs"](keychain)
friendly_name = __salt__["keychain.get_friendly_name"](name, password)
if friendly_name in certs:
file_hash = __salt__["keychain.get_hash"](name, password)
keychain_hash = __salt__["keychain.get_hash"](friendly_name)
if file_hash != keychain_hash:
out = __salt__["keychain.uninstall"](
friendly_name,
keychain,
keychain_password=kwargs.get("keychain_password"),
)
if "unable" not in out:
ret["comment"] += (
"Found a certificate with the same name but different hash,"
" removing it.\n"
)
ret["changes"]["uninstalled"] = friendly_name
# Reset the certs found
certs = __salt__["keychain.list_certs"](keychain)
else:
ret["result"] = False
ret[
"comment"
] += "Found an incorrect cert but was unable to uninstall it: {}".format(
friendly_name
)
return ret
if friendly_name not in certs:
out = __salt__["keychain.install"](name, password, keychain, **kwargs)
if "imported" in out:
ret["changes"]["installed"] = friendly_name
else:
ret["result"] = False
ret["comment"] += "Failed to install {}".format(friendly_name)
else:
ret["comment"] += "{} already installed.".format(friendly_name)
return ret
def uninstalled(
name,
password,
keychain="/Library/Keychains/System.keychain",
keychain_password=None,
):
"""
Uninstall a p12 certificate file from the macOS keychain
name
The certificate to uninstall, this can be a path for a .p12 or the friendly
name
password
The password for the certificate being installed formatted in the way
described for openssl command in the PASS PHRASE ARGUMENTS section
cert_name
The friendly name of the certificate, this can be used instead of giving a
certificate
keychain
The keychain to remove the certificate from, this defaults to
/Library/Keychains/System.keychain
keychain_password
If your keychain is likely to be locked pass the password and it will be unlocked
before running the import
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
certs = __salt__["keychain.list_certs"](keychain)
if ".p12" in name:
if "http" in name or "salt" in name:
name = __salt__["cp.cache_file"](name)
friendly_name = __salt__["keychain.get_friendly_name"](name, password)
else:
friendly_name = name
if friendly_name in certs:
out = __salt__["keychain.uninstall"](friendly_name, keychain, keychain_password)
if "unable" not in out:
ret["changes"]["uninstalled"] = friendly_name
else:
ret["result"] = False
ret["comment"] += "Failed to uninstall {}".format(friendly_name)
else:
ret["comment"] += "{} already uninstalled.".format(friendly_name)
return ret
def default_keychain(name, domain="user", user=None):
"""
Set the default keychain to use
name
The chain in which to use as the default
domain
The domain to use valid values are user|system|common|dynamic, the default is user
user
The user to run as
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if not os.path.exists(name):
ret["result"] = False
ret["comment"] += "Keychain not found at {}".format(name)
else:
out = __salt__["keychain.get_default_keychain"](user, domain)
if name in out:
ret["comment"] += "{} was already the default keychain.".format(name)
else:
out = __salt__["keychain.set_default_keychain"](name, domain, user)
if len(out) == 0:
ret["changes"]["default"] = name
else:
ret["result"] = False
ret["comment"] = "Failed to install keychain. {}".format(out)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mac_keychain.py | 0.644113 | 0.160529 | mac_keychain.py | pypi |
__virtualname__ = "win_iis"
def __virtual__():
"""
Load only on minions that have the win_iis module.
"""
if "win_iis.create_site" in __salt__:
return __virtualname__
return (False, "win_iis module could not be loaded")
def _get_binding_info(hostheader="", ipaddress="*", port=80):
"""
Combine the host header, IP address, and TCP port into bindingInformation format.
"""
ret = r"{}:{}:{}".format(ipaddress, port, hostheader.replace(" ", ""))
return ret
def deployed(
name, sourcepath, apppool="", hostheader="", ipaddress="*", port=80, protocol="http"
):
"""
Ensure the website has been deployed.
.. note:
This function only validates against the site name, and will return True even
if the site already exists with a different configuration. It will not modify
the configuration of an existing site.
:param str name: The IIS site name.
:param str sourcepath: The physical path of the IIS site.
:param str apppool: The name of the IIS application pool.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str protocol: The application protocol of the binding.
.. note:
If an application pool is specified, and that application pool does not already exist,
it will be created.
Example of usage with only the required arguments. This will default to using the default application pool
assigned by IIS:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
- apppool: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
- protocol: https
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
current_sites = __salt__["win_iis.list_sites"]()
if name in current_sites:
ret["comment"] = "Site already present: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Site will be created: {}".format(name)
ret["changes"] = {"old": None, "new": name}
else:
ret["comment"] = "Created site: {}".format(name)
ret["changes"] = {"old": None, "new": name}
ret["result"] = __salt__["win_iis.create_site"](
name, sourcepath, apppool, hostheader, ipaddress, port, protocol
)
return ret
def remove_site(name):
"""
Delete a website from IIS.
:param str name: The IIS site name.
Usage:
.. code-block:: yaml
defaultwebsite-remove:
win_iis.remove_site:
- name: Default Web Site
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
current_sites = __salt__["win_iis.list_sites"]()
if name not in current_sites:
ret["comment"] = "Site has already been removed: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Site will be removed: {}".format(name)
ret["changes"] = {"old": name, "new": None}
else:
ret["comment"] = "Removed site: {}".format(name)
ret["changes"] = {"old": name, "new": None}
ret["result"] = __salt__["win_iis.remove_site"](name)
return ret
def create_binding(
name, site, hostheader="", ipaddress="*", port=80, protocol="http", sslflags=0
):
"""
Create an IIS binding.
.. note:
This function only validates against the binding ipaddress:port:hostheader combination,
and will return True even if the binding already exists with a different configuration.
It will not modify the configuration of an existing binding.
:param str site: The IIS site name.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str protocol: The application protocol of the binding.
:param str sslflags: The flags representing certificate type and storage of the binding.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-https-binding:
win_iis.create_binding:
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-https-binding:
win_iis.create_binding:
- site: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
- protocol: https
- sslflags: 0
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
binding_info = _get_binding_info(hostheader, ipaddress, port)
current_bindings = __salt__["win_iis.list_bindings"](site)
if binding_info in current_bindings:
ret["comment"] = "Binding already present: {}".format(binding_info)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Binding will be created: {}".format(binding_info)
ret["changes"] = {"old": None, "new": binding_info}
else:
ret["comment"] = "Created binding: {}".format(binding_info)
ret["changes"] = {"old": None, "new": binding_info}
ret["result"] = __salt__["win_iis.create_binding"](
site, hostheader, ipaddress, port, protocol, sslflags
)
return ret
def remove_binding(name, site, hostheader="", ipaddress="*", port=80):
"""
Remove an IIS binding.
:param str site: The IIS site name.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-https-binding-remove:
win_iis.remove_binding:
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-https-binding-remove:
win_iis.remove_binding:
- site: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
binding_info = _get_binding_info(hostheader, ipaddress, port)
current_bindings = __salt__["win_iis.list_bindings"](site)
if binding_info not in current_bindings:
ret["comment"] = "Binding has already been removed: {}".format(binding_info)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Binding will be removed: {}".format(binding_info)
ret["changes"] = {"old": binding_info, "new": None}
else:
ret["comment"] = "Removed binding: {}".format(binding_info)
ret["changes"] = {"old": binding_info, "new": None}
ret["result"] = __salt__["win_iis.remove_binding"](
site, hostheader, ipaddress, port
)
return ret
def create_cert_binding(name, site, hostheader="", ipaddress="*", port=443, sslflags=0):
"""
Assign a certificate to an IIS binding.
.. note:
The web binding that the certificate is being assigned to must already exist.
:param str name: The thumbprint of the certificate.
:param str site: The IIS site name.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str sslflags: Flags representing certificate type and certificate storage of the binding.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-cert-binding:
win_iis.create_cert_binding:
- name: 9988776655443322111000AAABBBCCCDDDEEEFFF
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-cert-binding:
win_iis.create_cert_binding:
- name: 9988776655443322111000AAABBBCCCDDDEEEFFF
- site: site0
- hostheader: site0.local
- ipaddress: 192.168.1.199
- port: 443
- sslflags: 1
.. versionadded:: 2016.11.0
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
binding_info = _get_binding_info(hostheader, ipaddress, port)
current_cert_bindings = __salt__["win_iis.list_cert_bindings"](site)
if binding_info in current_cert_bindings:
current_name = current_cert_bindings[binding_info]["certificatehash"]
if name == current_name:
ret["comment"] = "Certificate binding already present: {}".format(name)
ret["result"] = True
return ret
ret["comment"] = (
"Certificate binding already present with a different"
" thumbprint: {}".format(current_name)
)
ret["result"] = False
elif __opts__["test"]:
ret["comment"] = "Certificate binding will be created: {}".format(name)
ret["changes"] = {"old": None, "new": name}
else:
ret["comment"] = "Created certificate binding: {}".format(name)
ret["changes"] = {"old": None, "new": name}
ret["result"] = __salt__["win_iis.create_cert_binding"](
name, site, hostheader, ipaddress, port, sslflags
)
return ret
def remove_cert_binding(name, site, hostheader="", ipaddress="*", port=443):
"""
Remove a certificate from an IIS binding.
.. note:
This function only removes the certificate from the web binding. It does
not remove the web binding itself.
:param str name: The thumbprint of the certificate.
:param str site: The IIS site name.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-cert-binding-remove:
win_iis.remove_cert_binding:
- name: 9988776655443322111000AAABBBCCCDDDEEEFFF
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-cert-binding-remove:
win_iis.remove_cert_binding:
- name: 9988776655443322111000AAABBBCCCDDDEEEFFF
- site: site0
- hostheader: site0.local
- ipaddress: 192.168.1.199
- port: 443
.. versionadded:: 2016.11.0
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
binding_info = _get_binding_info(hostheader, ipaddress, port)
current_cert_bindings = __salt__["win_iis.list_cert_bindings"](site)
if binding_info not in current_cert_bindings:
ret["comment"] = "Certificate binding has already been removed: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Certificate binding will be removed: {}".format(name)
ret["changes"] = {"old": name, "new": None}
else:
current_name = current_cert_bindings[binding_info]["certificatehash"]
if name == current_name:
ret["comment"] = "Removed certificate binding: {}".format(name)
ret["changes"] = {"old": name, "new": None}
ret["result"] = __salt__["win_iis.remove_cert_binding"](
name, site, hostheader, ipaddress, port
)
return ret
def create_apppool(name):
"""
Create an IIS application pool.
.. note:
This function only validates against the application pool name, and will return
True even if the application pool already exists with a different configuration.
It will not modify the configuration of an existing application pool.
:param str name: The name of the IIS application pool.
Usage:
.. code-block:: yaml
site0-apppool:
win_iis.create_apppool:
- name: site0
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
current_apppools = __salt__["win_iis.list_apppools"]()
if name in current_apppools:
ret["comment"] = "Application pool already present: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Application pool will be created: {}".format(name)
ret["changes"] = {"old": None, "new": name}
else:
ret["comment"] = "Created application pool: {}".format(name)
ret["changes"] = {"old": None, "new": name}
ret["result"] = __salt__["win_iis.create_apppool"](name)
return ret
def remove_apppool(name):
# Remove IIS AppPool
"""
Remove an IIS application pool.
:param str name: The name of the IIS application pool.
Usage:
.. code-block:: yaml
defaultapppool-remove:
win_iis.remove_apppool:
- name: DefaultAppPool
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
current_apppools = __salt__["win_iis.list_apppools"]()
if name not in current_apppools:
ret["comment"] = "Application pool has already been removed: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Application pool will be removed: {}".format(name)
ret["changes"] = {"old": name, "new": None}
else:
ret["comment"] = "Removed application pool: {}".format(name)
ret["changes"] = {"old": name, "new": None}
ret["result"] = __salt__["win_iis.remove_apppool"](name)
return ret
def container_setting(name, container, settings=None):
"""
Set the value of the setting for an IIS container.
:param str name: The name of the IIS container.
:param str container: The type of IIS container. The container types are:
AppPools, Sites, SslBindings
:param str settings: A dictionary of the setting names and their values.
Example of usage for the ``AppPools`` container:
.. code-block:: yaml
site0-apppool-setting:
win_iis.container_setting:
- name: site0
- container: AppPools
- settings:
managedPipelineMode: Integrated
processModel.maxProcesses: 1
processModel.userName: TestUser
processModel.password: TestPassword
processModel.identityType: SpecificUser
Example of usage for the ``Sites`` container:
.. code-block:: yaml
site0-site-setting:
win_iis.container_setting:
- name: site0
- container: Sites
- settings:
logFile.logFormat: W3C
logFile.period: Daily
limits.maxUrlSegments: 32
"""
identityType_map2string = {
0: "LocalSystem",
1: "LocalService",
2: "NetworkService",
3: "SpecificUser",
4: "ApplicationPoolIdentity",
}
ret = {"name": name, "changes": {}, "comment": "", "result": None}
if not settings:
ret["comment"] = "No settings to change provided."
ret["result"] = True
return ret
ret_settings = {
"changes": {},
"failures": {},
}
current_settings = __salt__["win_iis.get_container_setting"](
name=name, container=container, settings=settings.keys()
)
for setting in settings:
# map identity type from numeric to string for comparing
if (
setting == "processModel.identityType"
and settings[setting] in identityType_map2string.keys()
):
settings[setting] = identityType_map2string[settings[setting]]
if str(settings[setting]) != str(current_settings[setting]):
if setting == "processModel.password":
ret_settings["changes"][setting] = {
"old": "XXX-REDACTED-XXX",
"new": "XXX-REDACTED-XXX",
}
else:
ret_settings["changes"][setting] = {
"old": current_settings[setting],
"new": settings[setting],
}
if not ret_settings["changes"]:
ret["comment"] = "Settings already contain the provided values."
ret["result"] = True
return ret
elif __opts__["test"]:
ret["comment"] = "Settings will be changed."
ret["changes"] = ret_settings["changes"]
return ret
__salt__["win_iis.set_container_setting"](
name=name, container=container, settings=settings
)
new_settings = __salt__["win_iis.get_container_setting"](
name=name, container=container, settings=settings.keys()
)
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
if setting == "processModel.password":
ret_settings["failures"][setting] = {
"old": "XXX-REDACTED-XXX",
"new": "XXX-REDACTED-XXX",
}
else:
ret_settings["failures"][setting] = {
"old": current_settings[setting],
"new": new_settings[setting],
}
ret_settings["changes"].pop(setting, None)
else:
if setting == "processModel.password":
ret_settings["changes"][setting] = {
"old": "XXX-REDACTED-XXX",
"new": "XXX-REDACTED-XXX",
}
if ret_settings["failures"]:
ret["comment"] = "Some settings failed to change."
ret["changes"] = ret_settings
ret["result"] = False
else:
ret["comment"] = "Set settings to contain the provided values."
ret["changes"] = ret_settings["changes"]
ret["result"] = True
return ret
def create_app(name, site, sourcepath, apppool=None):
"""
Create an IIS application.
.. note:
This function only validates against the application name, and will return True
even if the application already exists with a different configuration. It will not
modify the configuration of an existing application.
:param str name: The IIS application.
:param str site: The IIS site name.
:param str sourcepath: The physical path.
:param str apppool: The name of the IIS application pool.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-v1-app:
win_iis.create_app:
- name: v1
- site: site0
- sourcepath: C:\\inetpub\\site0\\v1
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-v1-app:
win_iis.create_app:
- name: v1
- site: site0
- sourcepath: C:\\inetpub\\site0\\v1
- apppool: site0
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
current_apps = __salt__["win_iis.list_apps"](site)
if name in current_apps:
ret["comment"] = "Application already present: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Application will be created: {}".format(name)
ret["changes"] = {"old": None, "new": name}
else:
ret["comment"] = "Created application: {}".format(name)
ret["changes"] = {"old": None, "new": name}
ret["result"] = __salt__["win_iis.create_app"](name, site, sourcepath, apppool)
return ret
def remove_app(name, site):
"""
Remove an IIS application.
:param str name: The application name.
:param str site: The IIS site name.
Usage:
.. code-block:: yaml
site0-v1-app-remove:
win_iis.remove_app:
- name: v1
- site: site0
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
current_apps = __salt__["win_iis.list_apps"](site)
if name not in current_apps:
ret["comment"] = "Application has already been removed: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Application will be removed: {}".format(name)
ret["changes"] = {"old": name, "new": None}
else:
ret["comment"] = "Removed application: {}".format(name)
ret["changes"] = {"old": name, "new": None}
ret["result"] = __salt__["win_iis.remove_app"](name, site)
return ret
def create_vdir(name, site, sourcepath, app="/"):
"""
Create an IIS virtual directory.
.. note:
This function only validates against the virtual directory name, and will return
True even if the virtual directory already exists with a different configuration.
It will not modify the configuration of an existing virtual directory.
:param str name: The virtual directory name.
:param str site: The IIS site name.
:param str sourcepath: The physical path.
:param str app: The IIS application.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-foo-vdir:
win_iis.create_vdir:
- name: foo
- site: site0
- sourcepath: C:\\inetpub\\vdirs\\foo
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-foo-vdir:
win_iis.create_vdir:
- name: foo
- site: site0
- sourcepath: C:\\inetpub\\vdirs\\foo
- app: v1
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
current_vdirs = __salt__["win_iis.list_vdirs"](site, app)
if name in current_vdirs:
ret["comment"] = "Virtual directory already present: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Virtual directory will be created: {}".format(name)
ret["changes"] = {"old": None, "new": name}
else:
ret["comment"] = "Created virtual directory: {}".format(name)
ret["changes"] = {"old": None, "new": name}
ret["result"] = __salt__["win_iis.create_vdir"](name, site, sourcepath, app)
return ret
def remove_vdir(name, site, app="/"):
"""
Remove an IIS virtual directory.
:param str name: The virtual directory name.
:param str site: The IIS site name.
:param str app: The IIS application.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-foo-vdir-remove:
win_iis.remove_vdir:
- name: foo
- site: site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-foo-vdir-remove:
win_iis.remove_vdir:
- name: foo
- site: site0
- app: v1
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
current_vdirs = __salt__["win_iis.list_vdirs"](site, app)
if name not in current_vdirs:
ret["comment"] = "Virtual directory has already been removed: {}".format(name)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Virtual directory will be removed: {}".format(name)
ret["changes"] = {"old": name, "new": None}
else:
ret["comment"] = "Removed virtual directory: {}".format(name)
ret["changes"] = {"old": name, "new": None}
ret["result"] = __salt__["win_iis.remove_vdir"](name, site, app)
return ret
def set_app(name, site, settings=None):
# pylint: disable=anomalous-backslash-in-string
r"""
.. versionadded:: 2017.7.0
Set the value of the setting for an IIS web application.
.. note::
This function only configures existing app. Params are case sensitive.
:param str name: The IIS application.
:param str site: The IIS site name.
:param str settings: A dictionary of the setting names and their values.
Available settings:
- ``physicalPath`` - The physical path of the webapp
- ``applicationPool`` - The application pool for the webapp
- ``userName`` "connectAs" user
- ``password`` "connectAs" password for user
:rtype: bool
Example of usage:
.. code-block:: yaml
site0-webapp-setting:
win_iis.set_app:
- name: app0
- site: Default Web Site
- settings:
userName: domain\\user
password: pass
physicalPath: c:\inetpub\wwwroot
applicationPool: appPool0
"""
# pylint: enable=anomalous-backslash-in-string
ret = {"name": name, "changes": {}, "comment": "", "result": None}
if not settings:
ret["comment"] = "No settings to change provided."
ret["result"] = True
return ret
ret_settings = {
"changes": {},
"failures": {},
}
current_settings = __salt__["win_iis.get_webapp_settings"](
name=name, site=site, settings=settings.keys()
)
for setting in settings:
if str(settings[setting]) != str(current_settings[setting]):
ret_settings["changes"][setting] = {
"old": current_settings[setting],
"new": settings[setting],
}
if not ret_settings["changes"]:
ret["comment"] = "Settings already contain the provided values."
ret["result"] = True
return ret
elif __opts__["test"]:
ret["comment"] = "Settings will be changed."
ret["changes"] = ret_settings
return ret
__salt__["win_iis.set_webapp_settings"](name=name, site=site, settings=settings)
new_settings = __salt__["win_iis.get_webapp_settings"](
name=name, site=site, settings=settings.keys()
)
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
ret_settings["failures"][setting] = {
"old": current_settings[setting],
"new": new_settings[setting],
}
ret_settings["changes"].pop(setting, None)
if ret_settings["failures"]:
ret["comment"] = "Some settings failed to change."
ret["changes"] = ret_settings
ret["result"] = False
else:
ret["comment"] = "Set settings to contain the provided values."
ret["changes"] = ret_settings["changes"]
ret["result"] = True
return ret
def webconfiguration_settings(name, settings=None):
r"""
Set the value of webconfiguration settings.
:param str name: The name of the IIS PSPath containing the settings.
Possible PSPaths are :
MACHINE, MACHINE/WEBROOT, IIS:\, IIS:\Sites\sitename, ...
:param dict settings: Dictionaries of dictionaries.
You can match a specific item in a collection with this syntax inside a key:
'Collection[{name: site0}].logFile.directory'
Example of usage for the ``MACHINE/WEBROOT`` PSPath:
.. code-block:: yaml
MACHINE-WEBROOT-level-security:
win_iis.webconfiguration_settings:
- name: 'MACHINE/WEBROOT'
- settings:
system.web/authentication/forms:
requireSSL: True
protection: "All"
credentials.passwordFormat: "SHA1"
system.web/httpCookies:
httpOnlyCookies: True
Example of usage for the ``IIS:\Sites\site0`` PSPath:
.. code-block:: yaml
site0-IIS-Sites-level-security:
win_iis.webconfiguration_settings:
- name: 'IIS:\Sites\site0'
- settings:
system.webServer/httpErrors:
errorMode: "DetailedLocalOnly"
system.webServer/security/requestFiltering:
allowDoubleEscaping: False
verbs.Collection:
- verb: TRACE
allowed: False
fileExtensions.allowUnlisted: False
Example of usage for the ``IIS:\`` PSPath with a collection matching:
.. code-block:: yaml
site0-IIS-level-security:
win_iis.webconfiguration_settings:
- name: 'IIS:\'
- settings:
system.applicationHost/sites:
'Collection[{name: site0}].logFile.directory': 'C:\logs\iis\site0'
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
if not settings:
ret["comment"] = "No settings to change provided."
ret["result"] = True
return ret
ret_settings = {
"changes": {},
"failures": {},
}
settings_list = list()
for filter, filter_settings in settings.items():
for setting_name, value in filter_settings.items():
settings_list.append(
{"filter": filter, "name": setting_name, "value": value}
)
current_settings_list = __salt__["win_iis.get_webconfiguration_settings"](
name=name, settings=settings_list
)
for idx, setting in enumerate(settings_list):
is_collection = setting["name"].split(".")[-1] == "Collection"
# If this is a new setting and not an update to an existing setting
if len(current_settings_list) <= idx:
ret_settings["changes"][setting["filter"] + "." + setting["name"]] = {
"old": {},
"new": settings_list[idx]["value"],
}
elif (
is_collection
and list(map(dict, setting["value"]))
!= list(map(dict, current_settings_list[idx]["value"]))
) or (
not is_collection
and str(setting["value"]) != str(current_settings_list[idx]["value"])
):
ret_settings["changes"][setting["filter"] + "." + setting["name"]] = {
"old": current_settings_list[idx]["value"],
"new": settings_list[idx]["value"],
}
if not ret_settings["changes"]:
ret["comment"] = "Settings already contain the provided values."
ret["result"] = True
return ret
elif __opts__["test"]:
ret["comment"] = "Settings will be changed."
ret["changes"] = ret_settings
return ret
success = __salt__["win_iis.set_webconfiguration_settings"](
name=name, settings=settings_list
)
new_settings_list = __salt__["win_iis.get_webconfiguration_settings"](
name=name, settings=settings_list
)
for idx, setting in enumerate(settings_list):
is_collection = setting["name"].split(".")[-1] == "Collection"
if (is_collection and setting["value"] != new_settings_list[idx]["value"]) or (
not is_collection
and str(setting["value"]) != str(new_settings_list[idx]["value"])
):
ret_settings["failures"][setting["filter"] + "." + setting["name"]] = {
"old": current_settings_list[idx]["value"],
"new": new_settings_list[idx]["value"],
}
ret_settings["changes"].get(setting["filter"] + "." + setting["name"], None)
if ret_settings["failures"]:
ret["comment"] = "Some settings failed to change."
ret["changes"] = ret_settings
ret["result"] = False
else:
ret["comment"] = "Set settings to contain the provided values."
ret["changes"] = ret_settings["changes"]
ret["result"] = success
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_iis.py | 0.747892 | 0.270054 | win_iis.py | pypi |
import logging
log = logging.getLogger(__name__)
def absent(name):
"""
Ensure that the named index is absent.
name
Name of the index to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index = __salt__["elasticsearch.index_get"](index=name)
if index and name in index:
if __opts__["test"]:
ret["comment"] = "Index {} will be removed".format(name)
ret["changes"]["old"] = index[name]
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.index_delete"](index=name)
if ret["result"]:
ret["comment"] = "Successfully removed index {}".format(name)
ret["changes"]["old"] = index[name]
else:
ret[
"comment"
] = "Failed to remove index {} for unknown reasons".format(name)
else:
ret["comment"] = "Index {} is already absent".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def present(name, definition=None):
"""
.. versionadded:: 2015.8.0
.. versionchanged:: 2017.3.0
Marked ``definition`` as optional.
Ensure that the named index is present.
name
Name of the index to add
definition
Optional dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
**Example:**
.. code-block:: yaml
# Default settings
mytestindex:
elasticsearch_index.present
# Extra settings
mytestindex2:
elasticsearch_index.present:
- definition:
settings:
index:
number_of_shards: 10
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index_exists = __salt__["elasticsearch.index_exists"](index=name)
if not index_exists:
if __opts__["test"]:
ret["comment"] = "Index {} does not exist and will be created".format(
name
)
ret["changes"] = {"new": definition}
ret["result"] = None
else:
output = __salt__["elasticsearch.index_create"](
index=name, body=definition
)
if output:
ret["comment"] = "Successfully created index {}".format(name)
ret["changes"] = {
"new": __salt__["elasticsearch.index_get"](index=name)[name]
}
else:
ret["result"] = False
ret["comment"] = "Cannot create index {}, {}".format(name, output)
else:
ret["comment"] = "Index {} is already present".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/elasticsearch_index.py | 0.511717 | 0.176246 | elasticsearch_index.py | pypi |
def __virtual__():
"""
Only load if the postgres module is present
"""
if "postgres.datadir_init" not in __salt__:
return (
False,
"Unable to load postgres module. Make sure `postgres.bins_dir` is set.",
)
return True
def present(
name,
user=None,
password=None,
auth="password",
encoding="UTF8",
locale=None,
runas=None,
waldir=None,
checksums=False,
):
"""
Initialize the PostgreSQL data directory
name
The name of the directory to initialize
user
The database superuser name
password
The password to set for the postgres user
auth
The default authentication method for local connections
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
"""
_cmt = "Postgres data directory {} is already present".format(name)
ret = {"name": name, "changes": {}, "result": True, "comment": _cmt}
if not __salt__["postgres.datadir_exists"](name=name):
if __opts__["test"]:
ret["result"] = None
_cmt = "Postgres data directory {} is set to be initialized".format(name)
ret["comment"] = _cmt
return ret
kwargs = dict(
user=user,
password=password,
auth=auth,
encoding=encoding,
locale=locale,
waldir=waldir,
checksums=checksums,
runas=runas,
)
if __salt__["postgres.datadir_init"](name, **kwargs):
_cmt = "Postgres data directory {} has been initialized".format(name)
ret["comment"] = _cmt
ret["changes"][name] = "Present"
else:
_cmt = "Postgres data directory {} initialization failed".format(name)
ret["result"] = False
ret["comment"] = _cmt
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/postgres_initdb.py | 0.719679 | 0.202345 | postgres_initdb.py | pypi |
import copy
import logging
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if the kubernetes module is available in __salt__
"""
if "kubernetes.ping" in __salt__:
return True
return (False, "kubernetes module could not be loaded")
def _error(ret, err_msg):
"""
Helper function to propagate errors to
the end user.
"""
ret["result"] = False
ret["comment"] = err_msg
return ret
def deployment_absent(name, namespace="default", **kwargs):
"""
Ensures that the named deployment is absent from the given namespace.
name
The name of the deployment
namespace
The name of the namespace
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
deployment = __salt__["kubernetes.show_deployment"](name, namespace, **kwargs)
if deployment is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The deployment does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The deployment is going to be deleted"
ret["result"] = None
return ret
res = __salt__["kubernetes.delete_deployment"](name, namespace, **kwargs)
if res["code"] == 200:
ret["result"] = True
ret["changes"] = {"kubernetes.deployment": {"new": "absent", "old": "present"}}
ret["comment"] = res["message"]
else:
ret["comment"] = "Something went wrong, response: {}".format(res)
return ret
def deployment_present(
name,
namespace="default",
metadata=None,
spec=None,
source="",
template="",
**kwargs
):
"""
Ensures that the named deployment is present inside of the specified
namespace with the given metadata and spec.
If the deployment exists it will be replaced.
name
The name of the deployment.
namespace
The namespace holding the deployment. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the deployment object.
spec
The spec of the deployment object.
source
A file containing the definition of the deployment (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if (metadata or spec) and source:
return _error(
ret, "'source' cannot be used in combination with 'metadata' or 'spec'"
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
deployment = __salt__["kubernetes.show_deployment"](name, namespace, **kwargs)
if deployment is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The deployment is going to be created"
return ret
res = __salt__["kubernetes.create_deployment"](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
return ret
# TODO: improve checks # pylint: disable=fixme
log.info("Forcing the recreation of the deployment")
ret["comment"] = "The deployment is already present. Forcing recreation"
res = __salt__["kubernetes.replace_deployment"](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"] = {"metadata": metadata, "spec": spec}
ret["result"] = True
return ret
def service_present(
name,
namespace="default",
metadata=None,
spec=None,
source="",
template="",
**kwargs
):
"""
Ensures that the named service is present inside of the specified namespace
with the given metadata and spec.
If the deployment exists it will be replaced.
name
The name of the service.
namespace
The namespace holding the service. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the service object.
spec
The spec of the service object.
source
A file containing the definition of the service (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if (metadata or spec) and source:
return _error(
ret, "'source' cannot be used in combination with 'metadata' or 'spec'"
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
service = __salt__["kubernetes.show_service"](name, namespace, **kwargs)
if service is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The service is going to be created"
return ret
res = __salt__["kubernetes.create_service"](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
return ret
# TODO: improve checks # pylint: disable=fixme
log.info("Forcing the recreation of the service")
ret["comment"] = "The service is already present. Forcing recreation"
res = __salt__["kubernetes.replace_service"](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
old_service=service,
saltenv=__env__,
**kwargs
)
ret["changes"] = {"metadata": metadata, "spec": spec}
ret["result"] = True
return ret
def service_absent(name, namespace="default", **kwargs):
"""
Ensures that the named service is absent from the given namespace.
name
The name of the service
namespace
The name of the namespace
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
service = __salt__["kubernetes.show_service"](name, namespace, **kwargs)
if service is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The service does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The service is going to be deleted"
ret["result"] = None
return ret
res = __salt__["kubernetes.delete_service"](name, namespace, **kwargs)
if res["code"] == 200:
ret["result"] = True
ret["changes"] = {"kubernetes.service": {"new": "absent", "old": "present"}}
ret["comment"] = res["message"]
else:
ret["comment"] = "Something went wrong, response: {}".format(res)
return ret
def namespace_absent(name, **kwargs):
"""
Ensures that the named namespace is absent.
name
The name of the namespace
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
namespace = __salt__["kubernetes.show_namespace"](name, **kwargs)
if namespace is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The namespace does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The namespace is going to be deleted"
ret["result"] = None
return ret
res = __salt__["kubernetes.delete_namespace"](name, **kwargs)
if (
res["code"] == 200
or (isinstance(res["status"], str) and "Terminating" in res["status"])
or (isinstance(res["status"], dict) and res["status"]["phase"] == "Terminating")
):
ret["result"] = True
ret["changes"] = {"kubernetes.namespace": {"new": "absent", "old": "present"}}
if res["message"]:
ret["comment"] = res["message"]
else:
ret["comment"] = "Terminating"
else:
ret["comment"] = "Something went wrong, response: {}".format(res)
return ret
def namespace_present(name, **kwargs):
"""
Ensures that the named namespace is present.
name
The name of the namespace.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
namespace = __salt__["kubernetes.show_namespace"](name, **kwargs)
if namespace is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The namespace is going to be created"
return ret
res = __salt__["kubernetes.create_namespace"](name, **kwargs)
ret["result"] = True
ret["changes"]["namespace"] = {"old": {}, "new": res}
else:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The namespace already exists"
return ret
def secret_absent(name, namespace="default", **kwargs):
"""
Ensures that the named secret is absent from the given namespace.
name
The name of the secret
namespace
The name of the namespace
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
secret = __salt__["kubernetes.show_secret"](name, namespace, **kwargs)
if secret is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The secret does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The secret is going to be deleted"
ret["result"] = None
return ret
__salt__["kubernetes.delete_secret"](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a secret
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret["result"] = True
ret["changes"] = {"kubernetes.secret": {"new": "absent", "old": "present"}}
ret["comment"] = "Secret deleted"
return ret
def secret_present(
name, namespace="default", data=None, source=None, template=None, **kwargs
):
"""
Ensures that the named secret is present inside of the specified namespace
with the given data.
If the secret exists it will be replaced.
name
The name of the secret.
namespace
The namespace holding the secret. The 'default' one is going to be
used unless a different one is specified.
data
The dictionary holding the secrets.
source
A file containing the data of the secret in plain format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if data and source:
return _error(ret, "'source' cannot be used in combination with 'data'")
secret = __salt__["kubernetes.show_secret"](name, namespace, **kwargs)
if secret is None:
if data is None:
data = {}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The secret is going to be created"
return ret
res = __salt__["kubernetes.create_secret"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The secret is going to be replaced"
return ret
# TODO: improve checks # pylint: disable=fixme
log.info("Forcing the recreation of the service")
ret["comment"] = "The secret is already present. Forcing recreation"
res = __salt__["kubernetes.replace_secret"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"] = {
# Omit values from the return. They are unencrypted
# and can contain sensitive data.
"data": list(res["data"])
}
ret["result"] = True
return ret
def configmap_absent(name, namespace="default", **kwargs):
"""
Ensures that the named configmap is absent from the given namespace.
name
The name of the configmap
namespace
The namespace holding the configmap. The 'default' one is going to be
used unless a different one is specified.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
configmap = __salt__["kubernetes.show_configmap"](name, namespace, **kwargs)
if configmap is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The configmap does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The configmap is going to be deleted"
ret["result"] = None
return ret
__salt__["kubernetes.delete_configmap"](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a configmap
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret["result"] = True
ret["changes"] = {"kubernetes.configmap": {"new": "absent", "old": "present"}}
ret["comment"] = "ConfigMap deleted"
return ret
def configmap_present(
name, namespace="default", data=None, source=None, template=None, **kwargs
):
"""
Ensures that the named configmap is present inside of the specified namespace
with the given data.
If the configmap exists it will be replaced.
name
The name of the configmap.
namespace
The namespace holding the configmap. The 'default' one is going to be
used unless a different one is specified.
data
The dictionary holding the configmaps.
source
A file containing the data of the configmap in plain format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if data and source:
return _error(ret, "'source' cannot be used in combination with 'data'")
elif data is None:
data = {}
configmap = __salt__["kubernetes.show_configmap"](name, namespace, **kwargs)
if configmap is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The configmap is going to be created"
return ret
res = __salt__["kubernetes.create_configmap"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The configmap is going to be replaced"
return ret
# TODO: improve checks # pylint: disable=fixme
log.info("Forcing the recreation of the service")
ret["comment"] = "The configmap is already present. Forcing recreation"
res = __salt__["kubernetes.replace_configmap"](
name=name,
namespace=namespace,
data=data,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"] = {"data": res["data"]}
ret["result"] = True
return ret
def pod_absent(name, namespace="default", **kwargs):
"""
Ensures that the named pod is absent from the given namespace.
name
The name of the pod
namespace
The name of the namespace
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
pod = __salt__["kubernetes.show_pod"](name, namespace, **kwargs)
if pod is None:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The pod does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The pod is going to be deleted"
ret["result"] = None
return ret
res = __salt__["kubernetes.delete_pod"](name, namespace, **kwargs)
if res["code"] == 200 or res["code"] is None:
ret["result"] = True
ret["changes"] = {"kubernetes.pod": {"new": "absent", "old": "present"}}
if res["code"] is None:
ret["comment"] = "In progress"
else:
ret["comment"] = res["message"]
else:
ret["comment"] = "Something went wrong, response: {}".format(res)
return ret
def pod_present(
name,
namespace="default",
metadata=None,
spec=None,
source="",
template="",
**kwargs
):
"""
Ensures that the named pod is present inside of the specified
namespace with the given metadata and spec.
If the pod exists it will be replaced.
name
The name of the pod.
namespace
The namespace holding the pod. The 'default' one is going to be
used unless a different one is specified.
metadata
The metadata of the pod object.
spec
The spec of the pod object.
source
A file containing the definition of the pod (metadata and
spec) in the official kubernetes format.
template
Template engine to be used to render the source file.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if (metadata or spec) and source:
return _error(
ret, "'source' cannot be used in combination with 'metadata' or 'spec'"
)
if metadata is None:
metadata = {}
if spec is None:
spec = {}
pod = __salt__["kubernetes.show_pod"](name, namespace, **kwargs)
if pod is None:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The pod is going to be created"
return ret
res = __salt__["kubernetes.create_pod"](
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=__env__,
**kwargs
)
ret["changes"]["{}.{}".format(namespace, name)] = {"old": {}, "new": res}
else:
if __opts__["test"]:
ret["result"] = None
return ret
# TODO: fix replace_namespaced_pod validation issues
ret["comment"] = (
"salt is currently unable to replace a pod without "
"deleting it. Please perform the removal of the pod requiring "
"the 'pod_absent' state if this is the desired behaviour."
)
ret["result"] = False
return ret
ret["changes"] = {"metadata": metadata, "spec": spec}
ret["result"] = True
return ret
def node_label_absent(name, node, **kwargs):
"""
Ensures that the named label is absent from the node.
name
The name of the label
node
The name of the node
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
labels = __salt__["kubernetes.node_labels"](node, **kwargs)
if name not in labels:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The label does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The label is going to be deleted"
ret["result"] = None
return ret
__salt__["kubernetes.node_remove_label"](node_name=node, label_name=name, **kwargs)
ret["result"] = True
ret["changes"] = {"kubernetes.node_label": {"new": "absent", "old": "present"}}
ret["comment"] = "Label removed from node"
return ret
def node_label_folder_absent(name, node, **kwargs):
"""
Ensures the label folder doesn't exist on the specified node.
name
The name of label folder
node
The name of the node
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
labels = __salt__["kubernetes.node_labels"](node, **kwargs)
folder = name.strip("/") + "/"
labels_to_drop = []
new_labels = []
for label in labels:
if label.startswith(folder):
labels_to_drop.append(label)
else:
new_labels.append(label)
if not labels_to_drop:
ret["result"] = True if not __opts__["test"] else None
ret["comment"] = "The label folder does not exist"
return ret
if __opts__["test"]:
ret["comment"] = "The label folder is going to be deleted"
ret["result"] = None
return ret
for label in labels_to_drop:
__salt__["kubernetes.node_remove_label"](
node_name=node, label_name=label, **kwargs
)
ret["result"] = True
ret["changes"] = {
"kubernetes.node_label_folder_absent": {"old": list(labels), "new": new_labels}
}
ret["comment"] = "Label folder removed from node"
return ret
def node_label_present(name, node, value, **kwargs):
"""
Ensures that the named label is set on the named node
with the given value.
If the label exists it will be replaced.
name
The name of the label.
value
Value of the label.
node
Node to change.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
labels = __salt__["kubernetes.node_labels"](node, **kwargs)
if name not in labels:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The label is going to be set"
return ret
__salt__["kubernetes.node_add_label"](
label_name=name, label_value=value, node_name=node, **kwargs
)
elif labels[name] == value:
ret["result"] = True
ret["comment"] = "The label is already set and has the specified value"
return ret
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The label is going to be updated"
return ret
ret["comment"] = "The label is already set, changing the value"
__salt__["kubernetes.node_add_label"](
node_name=node, label_name=name, label_value=value, **kwargs
)
old_labels = copy.copy(labels)
labels[name] = value
ret["changes"]["{}.{}".format(node, name)] = {"old": old_labels, "new": labels}
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/kubernetes.py | 0.476336 | 0.191895 | kubernetes.py | pypi |
_DEFAULT_CONTEXT = "LocalMachine"
_DEFAULT_FORMAT = "cer"
_DEFAULT_STORE = "My"
def __virtual__():
"""
Load only on minions that have the win_pki module.
"""
if "win_pki.get_stores" in __salt__:
return True
return (False, "win_pki module could not be loaded")
def import_cert(
name,
cert_format=_DEFAULT_FORMAT,
context=_DEFAULT_CONTEXT,
store=_DEFAULT_STORE,
exportable=True,
password="",
saltenv="base",
):
"""
Import the certificate file into the given certificate store.
:param str name: The path of the certificate file to import.
:param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
:param bool exportable: Mark the certificate as exportable. Only applicable to pfx format.
:param str password: The password of the certificate. Only applicable to pfx format.
:param str saltenv: The environment the file resides in.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-cert-imported:
win_pki.import_cert:
- name: salt://win/webserver/certs/site0.cer
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-cert-imported:
win_pki.import_cert:
- name: salt://win/webserver/certs/site0.pfx
- cert_format: pfx
- context: LocalMachine
- store: My
- exportable: True
- password: TestPassword
- saltenv: base
"""
ret = {"name": name, "changes": dict(), "comment": "", "result": None}
store_path = r"Cert:\{}\{}".format(context, store)
cached_source_path = __salt__["cp.cache_file"](name, saltenv)
current_certs = __salt__["win_pki.get_certs"](context=context, store=store)
if password:
cert_props = __salt__["win_pki.get_cert_file"](
name=cached_source_path, cert_format=cert_format, password=password
)
else:
cert_props = __salt__["win_pki.get_cert_file"](
name=cached_source_path, cert_format=cert_format
)
if cert_props["thumbprint"] in current_certs:
ret["comment"] = "Certificate '{}' already contained in store: {}".format(
cert_props["thumbprint"], store_path
)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Certificate '{}' will be imported into store: {}".format(
cert_props["thumbprint"], store_path
)
ret["changes"] = {"old": None, "new": cert_props["thumbprint"]}
else:
ret["changes"] = {"old": None, "new": cert_props["thumbprint"]}
ret["result"] = __salt__["win_pki.import_cert"](
name=name,
cert_format=cert_format,
context=context,
store=store,
exportable=exportable,
password=password,
saltenv=saltenv,
)
if ret["result"]:
ret["comment"] = "Certificate '{}' imported into store: {}".format(
cert_props["thumbprint"], store_path
)
else:
ret[
"comment"
] = "Certificate '{}' unable to be imported into store: {}".format(
cert_props["thumbprint"], store_path
)
return ret
def remove_cert(name, thumbprint, context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
"""
Remove the certificate from the given certificate store.
:param str thumbprint: The thumbprint value of the target certificate.
:param str context: The name of the certificate store location context.
:param str store: The name of the certificate store.
Example of usage with only the required arguments:
.. code-block:: yaml
site0-cert-removed:
win_pki.remove_cert:
- thumbprint: 9988776655443322111000AAABBBCCCDDDEEEFFF
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-cert-removed:
win_pki.remove_cert:
- thumbprint: 9988776655443322111000AAABBBCCCDDDEEEFFF
- context: LocalMachine
- store: My
"""
ret = {"name": name, "changes": dict(), "comment": "", "result": None}
store_path = r"Cert:\{}\{}".format(context, store)
current_certs = __salt__["win_pki.get_certs"](context=context, store=store)
if thumbprint not in current_certs:
ret["comment"] = "Certificate '{}' already removed from store: {}".format(
thumbprint, store_path
)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Certificate '{}' will be removed from store: {}".format(
thumbprint, store_path
)
ret["changes"] = {"old": thumbprint, "new": None}
else:
ret["changes"] = {"old": thumbprint, "new": None}
ret["result"] = __salt__["win_pki.remove_cert"](
thumbprint=thumbprint, context=context, store=store
)
if ret["result"]:
ret["comment"] = "Certificate '{}' removed from store: {}".format(
thumbprint, store_path
)
else:
ret[
"comment"
] = "Certificate '{}' unable to be removed from store: {}".format(
thumbprint, store_path
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_pki.py | 0.745584 | 0.221961 | win_pki.py | pypi |
import logging
import os
import salt.utils.data
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = "dism"
def __virtual__():
"""
Only work on Windows where the DISM module is available
"""
if not salt.utils.platform.is_windows():
return False, "Module only available on Windows"
return __virtualname__
def capability_installed(
name, source=None, limit_access=False, image=None, restart=False
):
"""
Install a DISM capability
Args:
name (str): The capability to install
source (str): The optional source of the capability
limit_access (bool): Prevent DISM from contacting Windows Update for
online images
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
Run ``dism.available_capabilities`` to get a list of available
capabilities. This will help you get the proper name to use.
.. code-block:: yaml
install_dotnet35:
dism.capability_installed:
- name: NetFX3~~~~
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
old = __salt__["dism.installed_capabilities"]()
if name in old:
ret["comment"] = "The capability {} is already installed".format(name)
return ret
if __opts__["test"]:
ret["changes"]["capability"] = "{} will be installed".format(name)
ret["result"] = None
return ret
# Install the capability
status = __salt__["dism.add_capability"](name, source, limit_access, image, restart)
if status["retcode"] not in [0, 1641, 3010]:
ret["comment"] = "Failed to install {}: {}".format(name, status["stdout"])
ret["result"] = False
new = __salt__["dism.installed_capabilities"]()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret["comment"] = "Installed {}".format(name)
ret["changes"] = status
ret["changes"]["capability"] = changes
return ret
def capability_removed(name, image=None, restart=False):
"""
Uninstall a DISM capability
Args:
name (str): The capability to uninstall
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
Run ``dism.installed_capabilities`` to get a list of installed
capabilities. This will help you get the proper name to use.
.. code-block:: yaml
remove_dotnet35:
dism.capability_removed:
- name: NetFX3~~~~
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
old = __salt__["dism.installed_capabilities"]()
if name not in old:
ret["comment"] = "The capability {} is already removed".format(name)
return ret
if __opts__["test"]:
ret["changes"]["capability"] = "{} will be removed".format(name)
ret["result"] = None
return ret
# Remove the capability
status = __salt__["dism.remove_capability"](name, image, restart)
if status["retcode"] not in [0, 1641, 3010]:
ret["comment"] = "Failed to remove {}: {}".format(name, status["stdout"])
ret["result"] = False
new = __salt__["dism.installed_capabilities"]()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret["comment"] = "Removed {}".format(name)
ret["changes"] = status
ret["changes"]["capability"] = changes
return ret
def feature_installed(
name,
package=None,
source=None,
limit_access=False,
enable_parent=False,
image=None,
restart=False,
):
"""
Install a DISM feature
Args:
name (str): The feature in which to install
package (Optional[str]): The parent package for the feature. You do not
have to specify the package if it is the Windows Foundation Package.
Otherwise, use package to specify the parent package of the feature
source (str): The optional source of the feature
limit_access (bool): Prevent DISM from contacting Windows Update for
online images
enable_parent (Optional[bool]): True will enable all parent features of
the specified feature
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
Run ``dism.available_features`` to get a list of available features.
This will help you get the proper name to use.
.. code-block:: yaml
install_telnet_client:
dism.feature_installed:
- name: TelnetClient
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
old = __salt__["dism.installed_features"]()
if name in old:
ret["comment"] = "The feature {} is already installed".format(name)
return ret
if __opts__["test"]:
ret["changes"]["feature"] = "{} will be installed".format(name)
ret["result"] = None
return ret
# Install the feature
status = __salt__["dism.add_feature"](
name, package, source, limit_access, enable_parent, image, restart
)
if status["retcode"] not in [0, 1641, 3010]:
ret["comment"] = "Failed to install {}: {}".format(name, status["stdout"])
ret["result"] = False
new = __salt__["dism.installed_features"]()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret["comment"] = "Installed {}".format(name)
ret["changes"] = status
ret["changes"]["feature"] = changes
return ret
def feature_removed(name, remove_payload=False, image=None, restart=False):
"""
Disables a feature.
Args:
name (str): The feature to disable
remove_payload (Optional[bool]): Remove the feature's payload. Must
supply source when enabling in the future.
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
Run ``dism.installed_features`` to get a list of installed features.
This will help you get the proper name to use.
.. code-block:: yaml
remove_telnet_client:
dism.feature_removed:
- name: TelnetClient
- remove_payload: True
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
old = __salt__["dism.installed_features"]()
if name not in old:
ret["comment"] = "The feature {} is already removed".format(name)
return ret
if __opts__["test"]:
ret["changes"]["feature"] = "{} will be removed".format(name)
ret["result"] = None
return ret
# Remove the feature
status = __salt__["dism.remove_feature"](name, remove_payload, image, restart)
if status["retcode"] not in [0, 1641, 3010]:
ret["comment"] = "Failed to remove {}: {}".format(name, status["stdout"])
ret["result"] = False
new = __salt__["dism.installed_features"]()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret["comment"] = "Removed {}".format(name)
ret["changes"] = status
ret["changes"]["feature"] = changes
return ret
def package_installed(
name, ignore_check=False, prevent_pending=False, image=None, restart=False
):
"""
Install a package.
Args:
name (str): The package to install. Can be a .cab file, a .msu file,
or a folder
ignore_check (Optional[bool]): Skip installation of the package if the
applicability checks fail
prevent_pending (Optional[bool]): Skip the installation of the package
if there are pending online actions
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
.. code-block:: yaml
install_KB123123123:
dism.package_installed:
- name: C:\\Packages\\KB123123123.cab
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
# Fail if using a non-existent package path
if "~" not in name and not os.path.exists(name):
if __opts__["test"]:
ret["result"] = None
else:
ret["result"] = False
ret["comment"] = "Package path {} does not exist".format(name)
return ret
old = __salt__["dism.installed_packages"]()
# Get package info so we can see if it's already installed
package_info = __salt__["dism.package_info"](name)
if package_info["Package Identity"] in old:
ret["comment"] = "The package {} is already installed: {}".format(
name, package_info["Package Identity"]
)
return ret
if __opts__["test"]:
ret["changes"]["package"] = "{} will be installed".format(name)
ret["result"] = None
return ret
# Install the package
status = __salt__["dism.add_package"](
name, ignore_check, prevent_pending, image, restart
)
if status["retcode"] not in [0, 1641, 3010]:
ret["comment"] = "Failed to install {}: {}".format(name, status["stdout"])
ret["result"] = False
new = __salt__["dism.installed_packages"]()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret["comment"] = "Installed {}".format(name)
ret["changes"] = status
ret["changes"]["package"] = changes
return ret
def package_removed(name, image=None, restart=False):
"""
Uninstall a package
Args:
name (str): The full path to the package. Can be either a .cab file or a
folder. Should point to the original source of the package, not to
where the file is installed. This can also be the name of a package as listed in
``dism.installed_packages``
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
.. code-block:: yaml
# Example using source
remove_KB1231231:
dism.package_installed:
- name: C:\\Packages\\KB1231231.cab
# Example using name from ``dism.installed_packages``
remove_KB1231231:
dism.package_installed:
- name: Package_for_KB1231231~31bf3856ad364e35~amd64~~10.0.1.3
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
# Fail if using a non-existent package path
if "~" not in name and not os.path.exists(name):
if __opts__["test"]:
ret["result"] = None
else:
ret["result"] = False
ret["comment"] = "Package path {} does not exist".format(name)
return ret
old = __salt__["dism.installed_packages"]()
# Get package info so we can see if it's already removed
package_info = __salt__["dism.package_info"](name)
# If `Package Identity` isn't returned or if they passed a cab file, if
# `Package Identity` isn't in the list of installed packages
if (
"Package Identity" not in package_info
or package_info["Package Identity"] not in old
):
ret["comment"] = "The package {} is already removed".format(name)
return ret
if __opts__["test"]:
ret["changes"]["package"] = "{} will be removed".format(name)
ret["result"] = None
return ret
# Remove the package
status = __salt__["dism.remove_package"](name, image, restart)
if status["retcode"] not in [0, 1641, 3010]:
ret["comment"] = "Failed to remove {}: {}".format(name, status["stdout"])
ret["result"] = False
new = __salt__["dism.installed_packages"]()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret["comment"] = "Removed {}".format(name)
ret["changes"] = status
ret["changes"]["package"] = changes
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_dism.py | 0.777215 | 0.201538 | win_dism.py | pypi |
def __virtual__():
"""
Only load if the deb_postgres module is present
"""
if "postgres.cluster_exists" not in __salt__:
return (
False,
"Unable to load postgres module. Make sure `postgres.bins_dir` is set.",
)
return True
def present(
version,
name,
port=None,
encoding=None,
locale=None,
datadir=None,
allow_group_access=None,
data_checksums=None,
wal_segsize=None,
):
"""
Ensure that the named cluster is present with the specified properties.
For more information about all of these options see man pg_createcluster(1)
version
Version of the postgresql cluster
name
The name of the cluster
port
Cluster port
encoding
The character encoding scheme to be used in this database
locale
Locale with which to create cluster
datadir
Where the cluster is stored
allow_group_access
Allows users in the same group as the cluster owner to read all cluster files created by initdb
data_checksums
Use checksums on data pages
wal_segsize
Set the WAL segment size, in megabytes
.. versionadded:: 2016.3.0
"""
msg = "Cluster {}/{} is already present".format(version, name)
ret = {"name": name, "changes": {}, "result": True, "comment": msg}
if __salt__["postgres.cluster_exists"](version, name):
# check cluster config is correct
infos = __salt__["postgres.cluster_list"](verbose=True)
info = infos["{}/{}".format(version, name)]
# TODO: check locale en encoding configs also
if any(
(
port != info["port"] if port else False,
datadir != info["datadir"] if datadir else False,
)
):
ret["comment"] = (
"Cluster {}/{} has wrong parameters "
"which couldn't be changed on fly.".format(version, name)
)
ret["result"] = False
return ret
# The cluster is not present, add it!
if __opts__.get("test"):
ret["result"] = None
msg = "Cluster {0}/{1} is set to be created"
ret["comment"] = msg.format(version, name)
return ret
cluster = __salt__["postgres.cluster_create"](
version=version,
name=name,
port=port,
locale=locale,
encoding=encoding,
datadir=datadir,
allow_group_access=allow_group_access,
data_checksums=data_checksums,
wal_segsize=wal_segsize,
)
if cluster:
msg = "The cluster {0}/{1} has been created"
ret["comment"] = msg.format(version, name)
ret["changes"]["{}/{}".format(version, name)] = "Present"
else:
msg = "Failed to create cluster {0}/{1}"
ret["comment"] = msg.format(version, name)
ret["result"] = False
return ret
def absent(version, name):
"""
Ensure that the named cluster is absent
version
Version of the postgresql server of the cluster to remove
name
The name of the cluster to remove
.. versionadded:: 2016.3.0
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# check if cluster exists and remove it
if __salt__["postgres.cluster_exists"](version, name):
if __opts__.get("test"):
ret["result"] = None
msg = "Cluster {0}/{1} is set to be removed"
ret["comment"] = msg.format(version, name)
return ret
if __salt__["postgres.cluster_remove"](version, name, True):
msg = "Cluster {0}/{1} has been removed"
ret["comment"] = msg.format(version, name)
ret["changes"][name] = "Absent"
return ret
# fallback
ret["comment"] = "Cluster {}/{} is not present, so it cannot be removed".format(
version, name
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/postgres_cluster.py | 0.540439 | 0.221098 | postgres_cluster.py | pypi |
import copy
import datetime
import difflib
import salt.utils.data
import salt.utils.json
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_datapipeline.create_pipeline" in __salt__:
return "boto_datapipeline"
return (False, "boto_datapipeline module could not be loaded")
def present(
name,
pipeline_objects=None,
pipeline_objects_from_pillars="boto_datapipeline_pipeline_objects",
parameter_objects=None,
parameter_objects_from_pillars="boto_datapipeline_parameter_objects",
parameter_values=None,
parameter_values_from_pillars="boto_datapipeline_parameter_values",
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure the data pipeline exists with matching definition.
name
Name of the service to ensure a data pipeline exists for.
pipeline_objects
Pipeline objects to use. Will override objects read from pillars.
pipeline_objects_from_pillars
The pillar key to use for lookup.
parameter_objects
Parameter objects to use. Will override objects read from pillars.
parameter_objects_from_pillars
The pillar key to use for lookup.
parameter_values
Parameter values to use. Will override values read from pillars.
parameter_values_from_pillars
The pillar key to use for lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
pipeline_objects = pipeline_objects or {}
parameter_objects = parameter_objects or {}
parameter_values = parameter_values or {}
present, old_pipeline_definition = _pipeline_present_with_definition(
name,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
_parameter_objects(parameter_objects_from_pillars, parameter_objects),
_parameter_values(parameter_values_from_pillars, parameter_values),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if present:
ret["comment"] = "AWS data pipeline {} present".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Data pipeline {} is set to be created or updated".format(name)
ret["result"] = None
return ret
result_create_pipeline = __salt__["boto_datapipeline.create_pipeline"](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_create_pipeline:
ret["result"] = False
ret["comment"] = "Failed to create data pipeline {}: {}".format(
name, result_create_pipeline["error"]
)
return ret
pipeline_id = result_create_pipeline["result"]
result_pipeline_definition = __salt__["boto_datapipeline.put_pipeline_definition"](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(
parameter_objects_from_pillars, parameter_objects
),
parameter_values=_parameter_values(
parameter_values_from_pillars, parameter_values
),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_pipeline_definition:
if _immutable_fields_error(result_pipeline_definition):
# If update not possible, delete and retry
result_delete_pipeline = __salt__["boto_datapipeline.delete_pipeline"](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_delete_pipeline:
ret["result"] = False
ret["comment"] = "Failed to delete data pipeline {}: {}".format(
pipeline_id, result_delete_pipeline["error"]
)
return ret
result_create_pipeline = __salt__["boto_datapipeline.create_pipeline"](
name,
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_create_pipeline:
ret["result"] = False
ret["comment"] = "Failed to create data pipeline {}: {}".format(
name, result_create_pipeline["error"]
)
return ret
pipeline_id = result_create_pipeline["result"]
result_pipeline_definition = __salt__[
"boto_datapipeline.put_pipeline_definition"
](
pipeline_id,
_pipeline_objects(pipeline_objects_from_pillars, pipeline_objects),
parameter_objects=_parameter_objects(
parameter_objects_from_pillars, parameter_objects
),
parameter_values=_parameter_values(
parameter_values_from_pillars, parameter_values
),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_pipeline_definition:
# Still erroring after possible retry
ret["result"] = False
ret["comment"] = "Failed to create data pipeline {}: {}".format(
name, result_pipeline_definition["error"]
)
return ret
result_activate_pipeline = __salt__["boto_datapipeline.activate_pipeline"](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_activate_pipeline:
ret["result"] = False
ret["comment"] = "Failed to create data pipeline {}: {}".format(
name, result_pipeline_definition["error"]
)
return ret
pipeline_definition_result = __salt__["boto_datapipeline.get_pipeline_definition"](
pipeline_id,
version="active",
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in pipeline_definition_result:
new_pipeline_definition = {}
else:
new_pipeline_definition = _standardize(pipeline_definition_result["result"])
if not old_pipeline_definition:
ret["changes"]["new"] = "Pipeline created."
ret["comment"] = "Data pipeline {} created".format(name)
else:
ret["changes"]["diff"] = _diff(old_pipeline_definition, new_pipeline_definition)
ret["comment"] = "Data pipeline {} updated".format(name)
return ret
def _immutable_fields_error(result_pipeline_definition):
"""Return true if update pipeline failed due to immutable fields
Some fields cannot be changed after a pipeline has been activated.
http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-manage-pipeline-modify-console.html#dp-edit-pipeline-limits
"""
for e in result_pipeline_definition["error"]:
for e2 in e["errors"]:
if "can not be changed" in e2:
return True
return False
def _pipeline_present_with_definition(
name,
expected_pipeline_objects,
expected_parameter_objects,
expected_parameter_values,
region,
key,
keyid,
profile,
):
"""
Return true if the pipeline exists and the definition matches.
name
The name of the pipeline.
expected_pipeline_objects
Pipeline objects that must match the definition.
expected_parameter_objects
Parameter objects that must match the definition.
expected_parameter_values
Parameter values that must match the definition.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
result_pipeline_id = __salt__["boto_datapipeline.pipeline_id_from_name"](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in result_pipeline_id:
return False, {}
pipeline_id = result_pipeline_id["result"]
pipeline_definition_result = __salt__["boto_datapipeline.get_pipeline_definition"](
pipeline_id,
version="active",
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in pipeline_definition_result:
return False, {}
pipeline_definition = _standardize(pipeline_definition_result["result"])
pipeline_objects = pipeline_definition.get("pipelineObjects")
parameter_objects = pipeline_definition.get("parameterObjects")
parameter_values = pipeline_definition.get("parameterValues")
present = (
_recursive_compare(
_cleaned(pipeline_objects), _cleaned(expected_pipeline_objects)
)
and _recursive_compare(parameter_objects, expected_parameter_objects)
and _recursive_compare(parameter_values, expected_parameter_values)
)
return present, pipeline_definition
def _cleaned(_pipeline_objects):
"""Return standardized pipeline objects to be used for comparing
Remove year, month, and day components of the startDateTime so that data
pipelines with the same time of day but different days are considered
equal.
"""
pipeline_objects = copy.deepcopy(_pipeline_objects)
for pipeline_object in pipeline_objects:
if pipeline_object["id"] == "DefaultSchedule":
for field_object in pipeline_object["fields"]:
if field_object["key"] == "startDateTime":
start_date_time_string = field_object["stringValue"]
start_date_time = datetime.datetime.strptime(
start_date_time_string, "%Y-%m-%dT%H:%M:%S"
)
field_object["stringValue"] = start_date_time.strftime("%H:%M:%S")
return pipeline_objects
def _recursive_compare(v1, v2):
"""
Return v1 == v2. Compares list, dict, recursively.
"""
if isinstance(v1, list):
if v2 is None:
v2 = []
if len(v1) != len(v2):
return False
v1.sort(key=_id_or_key)
v2.sort(key=_id_or_key)
for x, y in zip(v1, v2):
if not _recursive_compare(x, y):
return False
return True
elif isinstance(v1, dict):
if v2 is None:
v2 = {}
v1 = dict(v1)
v2 = dict(v2)
if sorted(v1) != sorted(v2):
return False
for k in v1:
if not _recursive_compare(v1[k], v2[k]):
return False
return True
else:
return v1 == v2
def _id_or_key(list_item):
"""
Return the value at key 'id' or 'key'.
"""
if isinstance(list_item, dict):
if "id" in list_item:
return list_item["id"]
if "key" in list_item:
return list_item["key"]
return list_item
def _diff(old_pipeline_definition, new_pipeline_definition):
"""
Return string diff of pipeline definitions.
"""
old_pipeline_definition.pop("ResponseMetadata", None)
new_pipeline_definition.pop("ResponseMetadata", None)
diff = salt.utils.data.decode(
difflib.unified_diff(
salt.utils.json.dumps(old_pipeline_definition, indent=4).splitlines(True),
salt.utils.json.dumps(new_pipeline_definition, indent=4).splitlines(True),
)
)
return "".join(diff)
def _standardize(structure):
"""
Return standardized format for lists/dictionaries.
Lists of dictionaries are sorted by the value of the dictionary at
its primary key ('id' or 'key'). OrderedDict's are converted to
basic dictionaries.
"""
def mutating_helper(structure):
if isinstance(structure, list):
structure.sort(key=_id_or_key)
for each in structure:
mutating_helper(each)
elif isinstance(structure, dict):
structure = dict(structure)
for k, v in structure.items():
mutating_helper(k)
mutating_helper(v)
new_structure = copy.deepcopy(structure)
mutating_helper(new_structure)
return new_structure
def _pipeline_objects(pipeline_objects_from_pillars, pipeline_object_overrides):
"""
Return a list of pipeline objects that compose the pipeline
pipeline_objects_from_pillars
The pillar key to use for lookup
pipeline_object_overrides
Pipeline objects to use. Will override objects read from pillars.
"""
from_pillars = copy.deepcopy(__salt__["pillar.get"](pipeline_objects_from_pillars))
from_pillars.update(pipeline_object_overrides)
pipeline_objects = _standardize(_dict_to_list_ids(from_pillars))
for pipeline_object in pipeline_objects:
pipeline_object["fields"] = _properties_from_dict(pipeline_object["fields"])
return pipeline_objects
def _parameter_objects(parameter_objects_from_pillars, parameter_object_overrides):
"""
Return a list of parameter objects that configure the pipeline
parameter_objects_from_pillars
The pillar key to use for lookup
parameter_object_overrides
Parameter objects to use. Will override objects read from pillars.
"""
from_pillars = copy.deepcopy(__salt__["pillar.get"](parameter_objects_from_pillars))
from_pillars.update(parameter_object_overrides)
parameter_objects = _standardize(_dict_to_list_ids(from_pillars))
for parameter_object in parameter_objects:
parameter_object["attributes"] = _properties_from_dict(
parameter_object["attributes"]
)
return parameter_objects
def _parameter_values(parameter_values_from_pillars, parameter_value_overrides):
"""
Return a dictionary of parameter values that configure the pipeline
parameter_values_from_pillars
The pillar key to use for lookup
parameter_value_overrides
Parameter values to use. Will override values read from pillars.
"""
from_pillars = copy.deepcopy(__salt__["pillar.get"](parameter_values_from_pillars))
from_pillars.update(parameter_value_overrides)
parameter_values = _standardize(from_pillars)
return _properties_from_dict(parameter_values, key_name="id")
def _dict_to_list_ids(objects):
"""
Convert a dictionary to a list of dictionaries, where each element has
a key value pair {'id': key}. This makes it easy to override pillar values
while still satisfying the boto api.
"""
list_with_ids = []
for key, value in objects.items():
element = {"id": key}
element.update(value)
list_with_ids.append(element)
return list_with_ids
def _properties_from_dict(d, key_name="key"):
"""
Transforms dictionary into pipeline object properties.
The output format conforms to boto's specification.
Example input:
{
'a': '1',
'b': {
'ref': '2'
},
}
Example output:
[
{
'key': 'a',
'stringValue': '1',
},
{
'key': 'b',
'refValue': '2',
},
]
"""
fields = []
for key, value in d.items():
if isinstance(value, dict):
fields.append({key_name: key, "refValue": value["ref"]})
else:
fields.append({key_name: key, "stringValue": value})
return fields
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure a pipeline with the service_name does not exist
name
Name of the service to ensure a data pipeline does not exist for.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
result_pipeline_id = __salt__["boto_datapipeline.pipeline_id_from_name"](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" not in result_pipeline_id:
pipeline_id = result_pipeline_id["result"]
if __opts__["test"]:
ret["comment"] = "Data pipeline {} set to be deleted.".format(name)
ret["result"] = None
return ret
else:
__salt__["boto_datapipeline.delete_pipeline"](
pipeline_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret["changes"]["old"] = {"pipeline_id": pipeline_id}
ret["changes"]["new"] = None
else:
ret["comment"] = "AWS data pipeline {} absent.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_datapipeline.py | 0.492676 | 0.352341 | boto_datapipeline.py | pypi |
try:
import sqlite3
HAS_SQLITE3 = True
except ImportError:
HAS_SQLITE3 = False
def __virtual__():
"""
Only load if the sqlite3 module is available
"""
if HAS_SQLITE3:
return True
return (False, "Unable to import sqlite3")
def row_absent(name, db, table, where_sql, where_args=None):
"""
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
"""
changes = {"name": name, "changes": {}, "result": None, "comment": ""}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn, "SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(
conn, "SELECT * FROM `" + table + "` WHERE " + where_sql, where_args
)
if len(rows) > 1:
changes["result"] = False
changes["comment"] = "More than one row matched the specified query"
elif len(rows) == 1:
if __opts__["test"]:
changes["result"] = True
changes["comment"] = "Row will be removed in " + table
changes["changes"]["old"] = rows[0]
else:
if where_args is None:
cursor = conn.execute(
"DELETE FROM `" + table + "` WHERE " + where_sql
)
else:
cursor = conn.execute(
"DELETE FROM `" + table + "` WHERE " + where_sql, where_args
)
conn.commit()
if cursor.rowcount == 1:
changes["result"] = True
changes["comment"] = "Row removed"
changes["changes"]["old"] = rows[0]
else:
changes["result"] = False
changes["comment"] = "Unable to remove row"
else:
changes["result"] = True
changes["comment"] = "Row is absent"
except Exception as e: # pylint: disable=broad-except
changes["result"] = False
changes["comment"] = str(e)
finally:
if conn:
conn.close()
return changes
def row_present(name, db, table, data, where_sql, where_args=None, update=False):
"""
Checks to make sure the given row exists. If row exists and update is True
then row will be updated with data. Otherwise it will leave existing
row unmodified and check it against data. If the existing data
doesn't match data_check the state will fail. If the row doesn't
exist then it will insert data into the table. If more than one
row matches, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check the data
data
The dictionary of key/value pairs to check against if
row exists, insert into the table if it doesn't
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
update
True will replace the existing row with data
When False and the row exists and data does not equal
the row data then the state will fail
"""
changes = {"name": name, "changes": {}, "result": None, "comment": ""}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn, "SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(
conn, "SELECT * FROM `" + table + "` WHERE " + where_sql, where_args
)
if len(rows) > 1:
changes["result"] = False
changes["comment"] = "More than one row matched the specified query"
elif len(rows) == 1:
for key, value in data.items():
if key in rows[0] and rows[0][key] != value:
if update:
if __opts__["test"]:
changes["result"] = True
changes["comment"] = "Row will be update in " + table
else:
columns = []
params = []
for key, value in data.items():
columns.append("`" + key + "`=?")
params.append(value)
if where_args is not None:
params += where_args
sql = "UPDATE `" + table + "` SET "
sql += ",".join(columns)
sql += " WHERE "
sql += where_sql
cursor = conn.execute(sql, params)
conn.commit()
if cursor.rowcount == 1:
changes["result"] = True
changes["comment"] = "Row updated"
changes["changes"]["old"] = rows[0]
changes["changes"]["new"] = data
else:
changes["result"] = False
changes["comment"] = "Row update failed"
else:
changes["result"] = False
changes["comment"] = (
"Existing data does" + "not match desired state"
)
break
if changes["result"] is None:
changes["result"] = True
changes["comment"] = "Row exists"
else:
if __opts__["test"]:
changes["result"] = True
changes["changes"]["new"] = data
changes["comment"] = "Row will be inserted into " + table
else:
columns = []
value_stmt = []
values = []
for key, value in data.items():
value_stmt.append("?")
values.append(value)
columns.append("`" + key + "`")
sql = "INSERT INTO `" + table + "` ("
sql += ",".join(columns)
sql += ") VALUES ("
sql += ",".join(value_stmt)
sql += ")"
cursor = conn.execute(sql, values)
conn.commit()
if cursor.rowcount == 1:
changes["result"] = True
changes["changes"]["new"] = data
changes["comment"] = "Inserted row"
else:
changes["result"] = False
changes["comment"] = "Unable to insert data"
except Exception as e: # pylint: disable=broad-except
changes["result"] = False
changes["comment"] = str(e)
finally:
if conn:
conn.close()
return changes
def table_absent(name, db):
"""
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
"""
changes = {"name": name, "changes": {}, "result": None, "comment": ""}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(
conn,
"SELECT sql FROM sqlite_master " + " WHERE type='table' AND name=?",
[name],
)
if len(tables) == 1:
if __opts__["test"]:
changes["result"] = True
changes["comment"] = "'" + name + "' will be dropped"
else:
conn.execute("DROP TABLE " + name)
conn.commit()
changes["changes"]["old"] = tables[0][0]
changes["result"] = True
changes["comment"] = "'" + name + "' was dropped"
elif len(tables) == 0:
changes["result"] = True
changes["comment"] = "'" + name + "' is already absent"
else:
changes["result"] = False
changes["comment"] = "Multiple tables with the same name='" + name + "'"
except Exception as e: # pylint: disable=broad-except
changes["result"] = False
changes["comment"] = str(e)
finally:
if conn:
conn.close()
return changes
def table_present(name, db, schema, force=False):
"""
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
"""
changes = {"name": name, "changes": {}, "result": None, "comment": ""}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(
conn,
"SELECT sql FROM sqlite_master " + "WHERE type='table' AND name=?",
[name],
)
if len(tables) == 1:
sql = None
if isinstance(schema, str):
sql = schema.strip()
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__["test"]:
changes["result"] = True
changes["changes"]["old"] = tables[0][0]
changes["changes"]["new"] = sql
changes["comment"] = "'" + name + "' will be replaced"
else:
conn.execute("DROP TABLE `" + name + "`")
conn.execute(sql)
conn.commit()
changes["result"] = True
changes["changes"]["old"] = tables[0][0]
changes["changes"]["new"] = sql
changes["comment"] = "Replaced '" + name + "'"
else:
changes["result"] = False
changes["comment"] = (
"Expected schema=" + sql + "\nactual schema=" + tables[0][0]
)
else:
changes["result"] = True
changes["comment"] = "'" + name + "' exists with matching schema"
elif len(tables) == 0:
# Create the table
sql = None
if isinstance(schema, str):
sql = schema
else:
sql = _get_sql_from_schema(name, schema)
if __opts__["test"]:
changes["result"] = True
changes["changes"]["new"] = sql
changes["comment"] = "'" + name + "' will be created"
else:
conn.execute(sql)
conn.commit()
changes["result"] = True
changes["changes"]["new"] = sql
changes["comment"] = "Created table '" + name + "'"
else:
changes["result"] = False
changes["comment"] = "Multiple tables with the same name=" + name
except Exception as e: # pylint: disable=broad-except
changes["result"] = False
changes["comment"] = str(e)
finally:
if conn:
conn.close()
return changes
def _query(conn, sql, parameters=None):
cursor = None
if parameters is None:
cursor = conn.execute(sql)
else:
cursor = conn.execute(sql, parameters)
return cursor.fetchall()
def _get_sql_from_schema(name, schema):
return "CREATE TABLE `" + name + "` (" + ",".join(schema) + ")"
def _dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/sqlite3.py | 0.436862 | 0.261048 | sqlite3.py | pypi |
import os
import salt.utils.platform
def __virtual__():
"""
No dependency checks, and not renaming, just return True
"""
return True
def _norm_key(key):
"""
Normalize windows environment keys
"""
if salt.utils.platform.is_windows():
return key.upper()
return key
def setenv(
name,
value,
false_unsets=False,
clear_all=False,
update_minion=False,
permanent=False,
):
"""
Set the salt process environment variables.
name
The environment key to set. Must be a string.
value
Either a string or dict. When string, it will be the value
set for the environment key of 'name' above.
When a dict, each key/value pair represents an environment
variable to set.
false_unsets
If a key's value is False and false_unsets is True, then the
key will be removed from the salt processes environment dict
entirely. If a key's value is False and false_unsets is not
True, then the key's value will be set to an empty string.
Default: False
clear_all
USE WITH CAUTION! This option can unset environment variables
needed for salt to function properly.
If clear_all is True, then any environment variables not
defined in the environ dict will be deleted.
Default: False
update_minion
If True, apply these environ changes to the main salt-minion
process. If False, the environ changes will only affect the
current salt subprocess.
Default: False
permanent
On Windows minions this will set the environment variable in the
registry so that it is always added as a environment variable when
applications open. If you want to set the variable to HKLM instead of
HKCU just pass in "HKLM" for this parameter. On all other minion types
this will be ignored. Note: This will only take affect on applications
opened after this has been set.
Example:
.. code-block:: yaml
a_string_env:
environ.setenv:
- name: foo
- value: bar
- update_minion: True
a_dict_env:
environ.setenv:
- name: does_not_matter
- value:
foo: bar
baz: quux
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
environ = {}
if isinstance(value, str) or value is False:
environ[name] = value
elif isinstance(value, dict):
environ = value
else:
ret["result"] = False
ret["comment"] = "Environ value must be string, dict or False"
return ret
if clear_all is True:
# Any keys not in 'environ' dict supplied by user will be unset
to_unset = [key for key in os.environ if key not in environ]
for key in to_unset:
if false_unsets is not True:
# This key value will change to ''
ret["changes"].update({key: ""})
else:
# We're going to delete the key
ret["changes"].update({key: None})
current_environ = dict(os.environ)
already_set = []
for key, val in environ.items():
if val is False:
# We unset this key from the environment if
# false_unsets is True. Otherwise we want to set
# the value to ''
def key_exists():
if salt.utils.platform.is_windows():
permanent_hive = "HKCU"
permanent_key = "Environment"
if permanent == "HKLM":
permanent_hive = "HKLM"
permanent_key = (
r"SYSTEM\CurrentControlSet\Control\Session"
r" Manager\Environment"
)
out = __utils__["reg.read_value"](
permanent_hive, permanent_key, _norm_key(key)
)
return out["success"] is True
else:
return False
if current_environ.get(_norm_key(key), None) is None and not key_exists():
# The key does not exist in environment
if false_unsets is not True:
# This key will be added with value ''
ret["changes"].update({key: ""})
else:
# The key exists.
if false_unsets is not True:
# Check to see if the value will change
if current_environ.get(_norm_key(key), None) != "":
# This key value will change to ''
ret["changes"].update({key: ""})
else:
# We're going to delete the key
ret["changes"].update({key: None})
elif current_environ.get(_norm_key(key), "") == val:
already_set.append(key)
else:
ret["changes"].update({key: val})
if __opts__["test"]:
if ret["changes"]:
ret["comment"] = "Environ values will be changed"
else:
ret["comment"] = "Environ values are already set with the correct values"
return ret
if ret["changes"]:
environ_ret = __salt__["environ.setenv"](
environ, false_unsets, clear_all, update_minion, permanent
)
if not environ_ret:
ret["result"] = False
ret["comment"] = "Failed to set environ variables"
return ret
ret["result"] = True
ret["changes"] = environ_ret
ret["comment"] = "Environ values were set"
else:
ret["comment"] = "Environ values were already set with the correct values"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/environ.py | 0.542136 | 0.218305 | environ.py | pypi |
def __virtual__():
"""
Only load if the kmod module is available in __salt__
"""
if "kmod.available" in __salt__:
return True
return (False, "kmod module could not be loaded")
def _append_comment(ret, comment):
"""
append ``comment`` to ``ret['comment']``
"""
if ret["comment"]:
ret["comment"] = ret["comment"].rstrip() + "\n" + comment
else:
ret["comment"] = comment
return ret
def present(name, persist=False, mods=None):
"""
Ensure that the specified kernel module is loaded
name
The name of the kernel module to verify is loaded
persist
Also add module to ``/etc/modules``
mods
A list of modules to verify are loaded. If this argument is used, the
``name`` argument, although still required, is not used, and becomes a
placeholder
.. versionadded:: 2016.3.0
"""
if not isinstance(mods, (list, tuple)):
mods = [name]
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
loaded_mods = __salt__["kmod.mod_list"]()
if persist:
persist_mods = __salt__["kmod.mod_list"](True)
# Intersection of loaded modules and persistent modules
loaded_mods = list(set(loaded_mods) & set(persist_mods))
# Intersection of loaded and proposed modules
already_loaded = list(set(loaded_mods) & set(mods))
if len(already_loaded) == 1:
comment = "Kernel module {} is already present".format(already_loaded[0])
_append_comment(ret, comment)
elif len(already_loaded) > 1:
comment = "Kernel modules {} are already present".format(
", ".join(already_loaded)
)
_append_comment(ret, comment)
if len(already_loaded) == len(mods):
return ret # all modules already loaded
# Complement of proposed modules and already loaded modules
not_loaded = list(set(mods) - set(already_loaded))
if __opts__["test"]:
ret["result"] = None
if ret["comment"]:
ret["comment"] += "\n"
if len(not_loaded) == 1:
comment = "Kernel module {} is set to be loaded".format(not_loaded[0])
else:
comment = "Kernel modules {} are set to be loaded".format(
", ".join(not_loaded)
)
_append_comment(ret, comment)
return ret
# Complement of proposed, unloaded modules and available modules
unavailable = list(set(not_loaded) - set(__salt__["kmod.available"]()))
if unavailable:
if len(unavailable) == 1:
comment = "Kernel module {} is unavailable".format(unavailable[0])
else:
comment = "Kernel modules {} are unavailable".format(", ".join(unavailable))
_append_comment(ret, comment)
ret["result"] = False
# The remaining modules are not loaded and are available for loading
available = list(set(not_loaded) - set(unavailable))
loaded = {"yes": [], "no": [], "failed": []}
loaded_by_dependency = []
for mod in available:
if mod in loaded_by_dependency:
loaded["yes"].append(mod)
continue
load_result = __salt__["kmod.load"](mod, persist)
if isinstance(load_result, (list, tuple)):
if len(load_result) > 0:
for module in load_result:
ret["changes"][module] = "loaded"
if module != mod:
loaded_by_dependency.append(module)
loaded["yes"].append(mod)
else:
ret["result"] = False
loaded["no"].append(mod)
else:
ret["result"] = False
loaded["failed"].append([mod, load_result])
# Update comment with results
if len(loaded["yes"]) == 1:
_append_comment(ret, "Loaded kernel module {}".format(loaded["yes"][0]))
elif len(loaded["yes"]) > 1:
_append_comment(
ret, "Loaded kernel modules {}".format(", ".join(loaded["yes"]))
)
if len(loaded["no"]) == 1:
_append_comment(ret, "Failed to load kernel module {}".format(loaded["no"][0]))
if len(loaded["no"]) > 1:
_append_comment(
ret, "Failed to load kernel modules {}".format(", ".join(loaded["no"]))
)
if loaded["failed"]:
for mod, msg in loaded["failed"]:
_append_comment(ret, "Failed to load kernel module {}: {}".format(mod, msg))
return ret
def absent(name, persist=False, comment=True, mods=None):
"""
Verify that the named kernel module is not loaded
name
The name of the kernel module to verify is not loaded
persist
Remove module from ``/etc/modules``
comment
Comment out module in ``/etc/modules`` rather than remove it
mods
A list of modules to verify are unloaded. If this argument is used,
the ``name`` argument, although still required, is not used, and
becomes a placeholder
.. versionadded:: 2016.3.0
"""
if not isinstance(mods, (list, tuple)):
mods = [name]
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
loaded_mods = __salt__["kmod.mod_list"]()
if persist:
persist_mods = __salt__["kmod.mod_list"](True)
# Union of loaded modules and persistent modules
loaded_mods = list(set(loaded_mods) | set(persist_mods))
# Intersection of proposed modules and loaded modules
to_unload = list(set(mods) & set(loaded_mods))
if to_unload:
if __opts__["test"]:
ret["result"] = None
if len(to_unload) == 1:
_append_comment(
ret, "Kernel module {} is set to be removed".format(to_unload[0])
)
elif len(to_unload) > 1:
_append_comment(
ret,
"Kernel modules {} are set to be removed".format(
", ".join(to_unload)
),
)
return ret
# Unload modules and collect results
unloaded = {"yes": [], "no": [], "failed": []}
for mod in to_unload:
unload_result = __salt__["kmod.remove"](mod, persist, comment)
if isinstance(unload_result, (list, tuple)):
if len(unload_result) > 0:
for module in unload_result:
ret["changes"][module] = "removed"
unloaded["yes"].append(mod)
else:
ret["result"] = False
unloaded["no"].append(mod)
else:
ret["result"] = False
unloaded["failed"].append([mod, unload_result])
# Update comment with results
if len(unloaded["yes"]) == 1:
_append_comment(ret, "Removed kernel module {}".format(unloaded["yes"][0]))
elif len(unloaded["yes"]) > 1:
_append_comment(
ret, "Removed kernel modules {}".format(", ".join(unloaded["yes"]))
)
if len(unloaded["no"]) == 1:
_append_comment(
ret, "Failed to remove kernel module {}".format(unloaded["no"][0])
)
if len(unloaded["no"]) > 1:
_append_comment(
ret,
"Failed to remove kernel modules {}".format(", ".join(unloaded["no"])),
)
if unloaded["failed"]:
for mod, msg in unloaded["failed"]:
_append_comment(
ret, "Failed to remove kernel module {}: {}".format(mod, msg)
)
return ret
else:
if len(mods) == 1:
ret["comment"] = "Kernel module {} is already removed".format(mods[0])
else:
ret["comment"] = "Kernel modules {} are already removed".format(
", ".join(mods)
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/kmod.py | 0.523908 | 0.204114 | kmod.py | pypi |
import logging
import os
import salt.utils.data
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_rds.exists" in __salt__:
return "boto_rds"
return (False, __salt__.missing_fun_string("boto_rds.exists"))
def present(
name,
allocated_storage,
db_instance_class,
engine,
master_username,
master_user_password,
db_name=None,
storage_type=None,
db_security_groups=None,
vpc_security_group_ids=None,
vpc_security_groups=None,
availability_zone=None,
db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
db_cluster_identifier=None,
tde_credential_arn=None,
tde_credential_password=None,
storage_encrypted=None,
kms_keyid=None,
backup_retention_period=None,
preferred_backup_window=None,
port=None,
multi_az=None,
engine_version=None,
auto_minor_version_upgrade=None,
license_model=None,
iops=None,
option_group_name=None,
character_set_name=None,
publicly_accessible=None,
wait_status=None,
tags=None,
copy_tags_to_snapshot=None,
region=None,
domain=None,
key=None,
keyid=None,
monitoring_interval=None,
monitoring_role_arn=None,
domain_iam_role_name=None,
promotion_tier=None,
profile=None,
):
"""
Ensure RDS instance exists.
name
Name of the RDS state definition.
allocated_storage
The amount of storage (in gigabytes) to be initially allocated for the
database instance.
db_instance_class
The compute and memory capacity of the Amazon RDS DB instance.
engine
The name of the database engine to be used for this instance. Supported
engine types are: MySQL, mariadb, oracle-se1, oracle-se, oracle-ee, sqlserver-ee,
sqlserver-se, sqlserver-ex, sqlserver-web, postgres and aurora. For more
information, please see the ``engine`` argument in the Boto3 RDS
`create_db_instance`_ documentation.
master_username
The name of master user for the client DB instance.
master_user_password
The password for the master database user. Can be any printable ASCII
character except "/", '"', or "@".
db_name
The meaning of this parameter differs according to the database engine you use.
See the Boto3 RDS documentation to determine the appropriate value for your configuration.
https://boto3.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.create_db_instance
storage_type
Specifies the storage type to be associated with the DB instance.
Options are standard, gp2 and io1. If you specify io1, you must also include
a value for the Iops parameter.
db_security_groups
A list of DB security groups to associate with this DB instance.
vpc_security_group_ids
A list of EC2 VPC security group IDs to associate with this DB instance.
vpc_security_groups
A list of EC2 VPC security groups (IDs or Name tags) to associate with this DB instance.
availability_zone
The EC2 Availability Zone that the database instance will be created
in.
db_subnet_group_name
A DB subnet group to associate with this DB instance.
preferred_maintenance_window
The weekly time range (in UTC) during which system maintenance can
occur.
db_parameter_group_name
A DB parameter group to associate with this DB instance.
db_cluster_identifier
If the DB instance is a member of a DB cluster, contains the name of
the DB cluster that the DB instance is a member of.
tde_credential_arn
The ARN from the Key Store with which the instance is associated for
TDE encryption.
tde_credential_password
The password to use for TDE encryption if an encryption key is not used.
storage_encrypted
Specifies whether the DB instance is encrypted.
kms_keyid
If storage_encrypted is true, the KMS key identifier for the encrypted
DB instance.
backup_retention_period
The number of days for which automated backups are retained.
preferred_backup_window
The daily time range during which automated backups are created if
automated backups are enabled.
port
The port number on which the database accepts connections.
multi_az
Specifies if the DB instance is a Multi-AZ deployment. You cannot set
the AvailabilityZone parameter if the MultiAZ parameter is set to true.
engine_version
The version number of the database engine to use.
auto_minor_version_upgrade
Indicates that minor engine upgrades will be applied automatically to
the DB instance during the maintenance window.
license_model
License model information for this DB instance.
iops
The amount of Provisioned IOPS (input/output operations per second) to
be initially allocated for the DB instance.
option_group_name
Indicates that the DB instance should be associated with the specified
option group.
character_set_name
For supported engines, indicates that the DB instance should be
associated with the specified CharacterSet.
publicly_accessible
Specifies the accessibility options for the DB instance. A value of
true specifies an Internet-facing instance with a publicly resolvable
DNS name, which resolves to a public IP address. A value of false
specifies an internal instance with a DNS name that resolves to a
private IP address.
wait_status
Wait for the RDS instance to reach a desired status before finishing
the state. Available states: available, modifying, backing-up
tags
A dict of tags.
copy_tags_to_snapshot
Specifies whether tags are copied from the DB instance to snapshots of
the DB instance.
region
Region to connect to.
domain
The identifier of the Active Directory Domain.
key
AWS secret key to be used.
keyid
AWS access key to be used.
monitoring_interval
The interval, in seconds, between points when Enhanced Monitoring
metrics are collected for the DB instance.
monitoring_role_arn
The ARN for the IAM role that permits RDS to send Enhanced Monitoring
metrics to CloudWatch Logs.
domain_iam_role_name
Specify the name of the IAM role to be used when making API calls to
the Directory Service.
promotion_tier
A value that specifies the order in which an Aurora Replica is
promoted to the primary instance after a failure of the existing
primary instance. For more information, see Fault Tolerance for an
Aurora DB Cluster .
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. _create_db_instance: https://boto3.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.create_db_instance
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
r = __salt__["boto_rds.exists"](name, tags, region, key, keyid, profile)
if not r.get("exists"):
if __opts__["test"]:
ret["comment"] = "RDS instance {} would be created.".format(name)
ret["result"] = None
return ret
r = __salt__["boto_rds.create"](
name,
allocated_storage,
db_instance_class,
engine,
master_username,
master_user_password,
db_name,
db_security_groups,
vpc_security_group_ids,
vpc_security_groups,
availability_zone,
db_subnet_group_name,
preferred_maintenance_window,
db_parameter_group_name,
backup_retention_period,
preferred_backup_window,
port,
multi_az,
engine_version,
auto_minor_version_upgrade,
license_model,
iops,
option_group_name,
character_set_name,
publicly_accessible,
wait_status,
tags,
db_cluster_identifier,
storage_type,
tde_credential_arn,
tde_credential_password,
storage_encrypted,
kms_keyid,
domain,
copy_tags_to_snapshot,
monitoring_interval,
monitoring_role_arn,
domain_iam_role_name,
region,
promotion_tier,
key,
keyid,
profile,
)
if not r.get("created"):
ret["result"] = False
ret["comment"] = "Failed to create RDS instance {}.".format(
r["error"]["message"]
)
return ret
ret["changes"]["old"] = {"instance": None}
ret["changes"]["new"] = {
"instance": __salt__["boto_rds.describe_db_instances"](
name=name,
jmespath="DBInstances[0]",
region=region,
key=key,
keyid=keyid,
profile=profile,
)
}
ret["comment"] = "RDS instance {} created.".format(name)
else:
ret["comment"] = "RDS instance {} exists.".format(name)
return ret
def replica_present(
name,
source,
db_instance_class=None,
availability_zone=None,
port=None,
auto_minor_version_upgrade=None,
iops=None,
option_group_name=None,
publicly_accessible=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
db_parameter_group_name=None,
):
"""
Ensure RDS replica exists.
.. code-block:: yaml
Ensure myrds replica RDS exists:
boto_rds.create_replica:
- name: myreplica
- source: mydb
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
replica_exists = __salt__["boto_rds.exists"](
name, tags, region, key, keyid, profile
)
if not replica_exists.get("exists"):
if __opts__["test"]:
ret["comment"] = "RDS read replica {} is set to be created ".format(name)
ret["result"] = None
return ret
created = __salt__["boto_rds.create_read_replica"](
name,
source,
db_instance_class,
availability_zone,
port,
auto_minor_version_upgrade,
iops,
option_group_name,
publicly_accessible,
tags,
region,
key,
keyid,
profile,
)
if created:
ret["comment"] = "RDS replica {} created.".format(name)
ret["changes"]["old"] = {"instance": None}
ret["changes"]["new"] = {
"instance": __salt__["boto_rds.describe_db_instances"](
name=name,
jmespath="DBInstances[0]",
region=region,
key=key,
keyid=keyid,
profile=profile,
)
}
else:
ret["result"] = False
ret["comment"] = "Failed to create RDS replica {}.".format(name)
else:
jmespath = "DBInstances[0].DBParameterGroups[0].DBParameterGroupName"
pmg_name = __salt__["boto_rds.describe_db_instances"](
name=name,
jmespath=jmespath,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
pmg_name = pmg_name[0] if pmg_name else None
if pmg_name != db_parameter_group_name:
modified = __salt__["boto_rds.modify_db_instance"](
name=name,
db_parameter_group_name=db_parameter_group_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not modified:
ret["result"] = False
ret[
"comment"
] = "Failed to update parameter group of {} RDS instance.".format(name)
ret["changes"]["old"] = pmg_name
ret["changes"]["new"] = db_parameter_group_name
ret["result"] = True
ret["comment"] = "RDS replica {} exists.".format(name)
return ret
def subnet_group_present(
name,
description,
subnet_ids=None,
subnet_names=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure DB subnet group exists.
name
The name for the DB subnet group. This value is stored as a lowercase string.
subnet_ids
A list of the EC2 Subnet IDs for the DB subnet group.
Either subnet_ids or subnet_names must be provided.
subnet_names
A list of The EC2 Subnet names for the DB subnet group.
Either subnet_ids or subnet_names must be provided.
description
Subnet group description.
tags
A dict of tags.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
if not salt.utils.data.exactly_one((subnet_ids, subnet_names)):
raise SaltInvocationError(
"One (but not both) of subnet_ids or subnet_names must be provided."
)
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if not subnet_ids:
subnet_ids = []
if subnet_names:
for i in subnet_names:
r = __salt__["boto_vpc.get_resource_id"](
"subnet", name=i, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["comment"] = "Error looking up subnet ids: {}".format(
r["error"]["message"]
)
ret["result"] = False
return ret
if r["id"] is None:
ret["comment"] = "Subnet {} does not exist.".format(i)
ret["result"] = False
return ret
subnet_ids.append(r["id"])
exists = __salt__["boto_rds.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists.get("exists"):
if __opts__["test"]:
ret["comment"] = "Subnet group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_rds.create_subnet_group"](
name=name,
description=description,
subnet_ids=subnet_ids,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not created:
ret["result"] = False
ret["comment"] = "Failed to create {} subnet group.".format(name)
return ret
ret["changes"]["old"] = None
ret["changes"]["new"] = name
ret["comment"] = "Subnet {} created.".format(name)
return ret
else:
ret["comment"] = "Subnet {} present.".format(name)
return ret
def absent(
name,
skip_final_snapshot=None,
final_db_snapshot_identifier=None,
tags=None,
wait_for_deletion=True,
timeout=180,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure RDS instance is absent.
name
Name of the RDS instance.
skip_final_snapshot
Whether a final db snapshot is created before the instance is deleted.
If True, no snapshot is created.
If False, a snapshot is created before deleting the instance.
final_db_snapshot_identifier
If a final snapshot is requested, this is the identifier used for that
snapshot.
tags
A dict of tags.
wait_for_deletion (bool)
Wait for the RDS instance to be deleted completely before finishing
the state.
timeout (in seconds)
The amount of time that can pass before raising an Exception.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
current = __salt__["boto_rds.describe_db_instances"](
name=name, region=region, key=key, keyid=keyid, profile=profile
)
if not current:
ret["result"] = True
ret["comment"] = "{} RDS already absent.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "RDS {} would be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_rds.delete"](
name,
skip_final_snapshot,
final_db_snapshot_identifier,
region,
key,
keyid,
profile,
tags,
wait_for_deletion,
timeout,
)
if not deleted:
ret["result"] = False
ret["comment"] = "Failed to delete {} RDS.".format(name)
return ret
ret["changes"]["old"] = {"instance": current[0]}
ret["changes"]["new"] = {"instance": None}
ret["comment"] = "RDS {} deleted.".format(name)
return ret
def subnet_group_absent(
name, tags=None, region=None, key=None, keyid=None, profile=None
):
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_rds.subnet_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not exists:
ret["result"] = True
ret["comment"] = "{} RDS subnet group does not exist.".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "RDS subnet group {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_rds.delete_subnet_group"](
name, region, key, keyid, profile
)
if not deleted:
ret["result"] = False
ret["comment"] = "Failed to delete {} RDS subnet group.".format(name)
return ret
ret["changes"]["old"] = name
ret["changes"]["new"] = None
ret["comment"] = "RDS subnet group {} deleted.".format(name)
return ret
def parameter_present(
name,
db_parameter_group_family,
description,
parameters=None,
apply_method="pending-reboot",
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure DB parameter group exists and update parameters.
name
The name for the parameter group.
db_parameter_group_family
The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
description
Parameter group description.
parameters
The DB parameters that need to be changed of type dictionary.
apply_method
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
tags
A dict of tags.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
res = __salt__["boto_rds.parameter_group_exists"](
name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile
)
if not res.get("exists"):
if __opts__["test"]:
ret["comment"] = "Parameter group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_rds.create_parameter_group"](
name=name,
db_parameter_group_family=db_parameter_group_family,
description=description,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not created:
ret["result"] = False
ret["comment"] = "Failed to create {} parameter group.".format(name)
return ret
ret["changes"]["New Parameter Group"] = name
ret["comment"] = "Parameter group {} created.".format(name)
else:
ret["comment"] = "Parameter group {} present.".format(name)
if parameters is not None:
params = {}
changed = {}
for items in parameters:
for k, value in items.items():
if type(value) is bool:
params[k] = "on" if value else "off"
else:
params[k] = str(value)
log.debug("Parameters from user are : %s.", params)
options = __salt__["boto_rds.describe_parameters"](
name=name, region=region, key=key, keyid=keyid, profile=profile
)
if not options.get("result"):
ret["result"] = False
ret["comment"] = os.linesep.join(
[ret["comment"], "Faled to get parameters for group {}.".format(name)]
)
return ret
for parameter in options["parameters"].values():
if parameter["ParameterName"] in params and params.get(
parameter["ParameterName"]
) != str(parameter["ParameterValue"]):
log.debug(
"Values that are being compared for %s are %s:%s.",
parameter["ParameterName"],
params.get(parameter["ParameterName"]),
parameter["ParameterValue"],
)
changed[parameter["ParameterName"]] = params.get(
parameter["ParameterName"]
)
if len(changed) > 0:
if __opts__["test"]:
ret["comment"] = os.linesep.join(
[
ret["comment"],
"Parameters {} for group {} are set to be changed.".format(
changed, name
),
]
)
ret["result"] = None
return ret
update = __salt__["boto_rds.update_parameter_group"](
name,
parameters=changed,
apply_method=apply_method,
tags=tags,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in update:
ret["result"] = False
ret["comment"] = os.linesep.join(
[
ret["comment"],
"Failed to change parameters {} for group {}:".format(
changed, name
),
update["error"]["message"],
]
)
return ret
ret["changes"]["Parameters"] = changed
ret["comment"] = os.linesep.join(
[
ret["comment"],
"Parameters {} for group {} are changed.".format(changed, name),
]
)
else:
ret["comment"] = os.linesep.join(
[
ret["comment"],
"Parameters {} for group {} are present.".format(params, name),
]
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_rds.py | 0.672547 | 0.175009 | boto_rds.py | pypi |
import logging
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = "proxy"
def __virtual__():
"""
Only work on Mac OS and Windows
"""
if salt.utils.platform.is_darwin() or salt.utils.platform.is_windows():
return True
return (False, "Only Mac OS and Windows supported")
def managed(
name,
port,
services=None,
user=None,
password=None,
bypass_domains=None,
network_service="Ethernet",
):
"""
Manages proxy settings for this mininon
name
The proxy server to use
port
The port used by the proxy server
services
A list of the services that should use the given proxy settings, valid services include http, https and ftp.
If no service is given all of the valid services will be used.
user
The username to use for the proxy server if required
password
The password to use for the proxy server if required
bypass_domains
An array of the domains that should bypass the proxy
network_service
The network service to apply the changes to, this only necessary on
macOS
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
valid_services = ["http", "https", "ftp"]
if services is None:
services = valid_services
# Darwin
if __grains__["os"] in ["MacOS", "Darwin"]:
ret["changes"] = {"new": []}
for service in services:
current_settings = __salt__["proxy.get_{}_proxy".format(service)]()
if current_settings.get("server") == name and current_settings.get(
"port"
) == str(port):
ret["comment"] += "{} proxy settings already set.\n".format(service)
elif __salt__["proxy.set_{}_proxy".format(service)](
name, port, user, password, network_service
):
ret["comment"] += "{} proxy settings updated correctly\n".format(
service
)
ret["changes"]["new"].append(
{"service": service, "server": name, "port": port, "user": user}
)
else:
ret["result"] = False
ret["comment"] += "Failed to set {0} proxy settings.\n"
if bypass_domains is not None:
current_domains = __salt__["proxy.get_proxy_bypass"]()
if len(set(current_domains).intersection(bypass_domains)) == len(
bypass_domains
):
ret["comment"] += "Proxy bypass domains are already set correctly.\n"
elif __salt__["proxy.set_proxy_bypass"](bypass_domains, network_service):
ret["comment"] += "Proxy bypass domains updated correctly\n"
ret["changes"]["new"].append(
{
"bypass_domains": list(
set(bypass_domains).difference(current_domains)
)
}
)
else:
ret["result"] = False
ret["comment"] += "Failed to set bypass proxy domains.\n"
if len(ret["changes"]["new"]) == 0:
del ret["changes"]["new"]
return ret
# Windows - Needs its own branch as all settings need to be set at the same time
if __grains__["os"] in ["Windows"]:
changes_needed = False
current_settings = __salt__["proxy.get_proxy_win"]()
current_domains = __salt__["proxy.get_proxy_bypass"]()
if current_settings.get("enabled", False) is True:
for service in services:
# We need to update one of our proxy servers
if service not in current_settings:
changes_needed = True
break
if current_settings[service]["server"] != name or current_settings[
service
]["port"] != str(port):
changes_needed = True
break
else:
# Proxy settings aren't enabled
changes_needed = True
# We need to update our bypass domains
if len(set(current_domains).intersection(bypass_domains)) != len(
bypass_domains
):
changes_needed = True
if changes_needed:
if __salt__["proxy.set_proxy_win"](name, port, services, bypass_domains):
ret["comment"] = "Proxy settings updated correctly"
else:
ret["result"] = False
ret["comment"] = "Failed to set {0} proxy settings."
else:
ret["comment"] = "Proxy settings already correct."
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/proxy.py | 0.525856 | 0.16099 | proxy.py | pypi |
import os
import salt.utils.path
def __virtual__():
"""
Only load the module if lvm is installed
"""
if salt.utils.path.which("lvm"):
return "lvm"
return (False, "lvm command not found")
def _convert_to_mb(size):
str_size = str(size).lower()
unit = str_size[-1:]
if unit.isdigit():
unit = "m"
elif unit == "b":
unit = str_size[-2:-1]
str_size = str_size[:-2]
else:
str_size = str_size[:-1]
if str_size[-1:].isdigit():
size = int(str_size)
else:
raise salt.exceptions.ArgumentValueError("Size {} is invalid.".format(size))
if unit == "s":
target_size = size / 2048
elif unit == "m":
target_size = size
elif unit == "g":
target_size = size * 1024
elif unit == "t":
target_size = size * 1024 * 1024
elif unit == "p":
target_size = size * 1024 * 1024 * 1024
else:
raise salt.exceptions.ArgumentValueError("Unit {} is invalid.".format(unit))
return target_size
def pv_present(name, **kwargs):
"""
Set a Physical Device to be used as an LVM Physical Volume
name
The device name to initialize.
kwargs
Any supported options to pvcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if __salt__["lvm.pvdisplay"](name, quiet=True):
ret["comment"] = "Physical Volume {} already present".format(name)
elif __opts__["test"]:
ret["comment"] = "Physical Volume {} is set to be created".format(name)
ret["result"] = None
return ret
else:
changes = __salt__["lvm.pvcreate"](name, **kwargs)
if __salt__["lvm.pvdisplay"](name):
ret["comment"] = "Created Physical Volume {}".format(name)
ret["changes"]["created"] = changes
else:
ret["comment"] = "Failed to create Physical Volume {}".format(name)
ret["result"] = False
return ret
def pv_absent(name):
"""
Ensure that a Physical Device is not being used by lvm
name
The device name to initialize.
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if not __salt__["lvm.pvdisplay"](name, quiet=True):
ret["comment"] = "Physical Volume {} does not exist".format(name)
elif __opts__["test"]:
ret["comment"] = "Physical Volume {} is set to be removed".format(name)
ret["result"] = None
return ret
else:
changes = __salt__["lvm.pvremove"](name)
if __salt__["lvm.pvdisplay"](name, quiet=True):
ret["comment"] = "Failed to remove Physical Volume {}".format(name)
ret["result"] = False
else:
ret["comment"] = "Removed Physical Volume {}".format(name)
ret["changes"]["removed"] = changes
return ret
def vg_present(name, devices=None, **kwargs):
"""
Create an LVM Volume Group
name
The Volume Group name to create
devices
A list of devices that will be added to the Volume Group
kwargs
Any supported options to vgcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if isinstance(devices, str):
devices = devices.split(",")
if __salt__["lvm.vgdisplay"](name, quiet=True):
ret["comment"] = "Volume Group {} already present".format(name)
for device in devices:
realdev = os.path.realpath(device)
pvs = __salt__["lvm.pvdisplay"](realdev, real=True)
if pvs and pvs.get(realdev, None):
if pvs[realdev]["Volume Group Name"] == name:
ret["comment"] = "{}\n{}".format(
ret["comment"], "{} is part of Volume Group".format(device)
)
elif pvs[realdev]["Volume Group Name"] in ["", "#orphans_lvm2"]:
__salt__["lvm.vgextend"](name, device)
pvs = __salt__["lvm.pvdisplay"](realdev, real=True)
if pvs[realdev]["Volume Group Name"] == name:
ret["changes"].update({device: "added to {}".format(name)})
else:
ret["comment"] = "{}\n{}".format(
ret["comment"], "{} could not be added".format(device)
)
ret["result"] = False
else:
ret["comment"] = "{}\n{}".format(
ret["comment"],
"{} is part of {}".format(
device, pvs[realdev]["Volume Group Name"]
),
)
ret["result"] = False
else:
ret["comment"] = "{}\n{}".format(
ret["comment"], "pv {} is not present".format(device)
)
ret["result"] = False
elif __opts__["test"]:
ret["comment"] = "Volume Group {} is set to be created".format(name)
ret["result"] = None
return ret
else:
changes = __salt__["lvm.vgcreate"](name, devices, **kwargs)
if __salt__["lvm.vgdisplay"](name):
ret["comment"] = "Created Volume Group {}".format(name)
ret["changes"]["created"] = changes
else:
ret["comment"] = "Failed to create Volume Group {}".format(name)
ret["result"] = False
return ret
def vg_absent(name):
"""
Remove an LVM volume group
name
The volume group to remove
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if not __salt__["lvm.vgdisplay"](name, quiet=True):
ret["comment"] = "Volume Group {} already absent".format(name)
elif __opts__["test"]:
ret["comment"] = "Volume Group {} is set to be removed".format(name)
ret["result"] = None
return ret
else:
changes = __salt__["lvm.vgremove"](name)
if not __salt__["lvm.vgdisplay"](name, quiet=True):
ret["comment"] = "Removed Volume Group {}".format(name)
ret["changes"]["removed"] = changes
else:
ret["comment"] = "Failed to remove Volume Group {}".format(name)
ret["result"] = False
return ret
def lv_present(
name,
vgname=None,
size=None,
extents=None,
snapshot=None,
pv="",
thinvolume=False,
thinpool=False,
force=False,
resizefs=False,
**kwargs
):
"""
Ensure that a Logical Volume is present, creating it if absent.
name
The name of the Logical Volume
vgname
The name of the Volume Group on which the Logical Volume resides
size
The size of the Logical Volume in megabytes, or use a suffix
such as S, M, G, T, P for 512 byte sectors, megabytes, gigabytes
or terabytes respectively. The suffix is case insensitive.
extents
The number of logical extents allocated to the Logical Volume
It can be a percentage allowed by lvcreate's syntax, in this case
it will set the Logical Volume initial size and won't be resized.
snapshot
The name of the snapshot
pv
The Physical Volume to use
kwargs
Any supported options to lvcreate. See
:mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
.. versionadded:: 2016.11.0
thinvolume
Logical Volume is thinly provisioned
thinpool
Logical Volume is a thin pool
.. versionadded:: 2018.3.0
force
Assume yes to all prompts
.. versionadded:: 3002.0
resizefs
Use fsadm to resize the logical volume filesystem if needed
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if extents and size:
ret["comment"] = "Only one of extents or size can be specified."
ret["result"] = False
return ret
if size:
size_mb = _convert_to_mb(size)
_snapshot = None
if snapshot:
_snapshot = name
name = snapshot
if thinvolume:
lvpath = "/dev/{}/{}".format(vgname.split("/")[0], name)
else:
lvpath = "/dev/{}/{}".format(vgname, name)
lv_info = __salt__["lvm.lvdisplay"](lvpath, quiet=True)
lv_info = lv_info.get(lvpath)
if not lv_info:
if __opts__["test"]:
ret["comment"] = "Logical Volume {} is set to be created".format(name)
ret["result"] = None
return ret
else:
changes = __salt__["lvm.lvcreate"](
name,
vgname,
size=size,
extents=extents,
snapshot=_snapshot,
pv=pv,
thinvolume=thinvolume,
thinpool=thinpool,
force=force,
**kwargs
)
if __salt__["lvm.lvdisplay"](lvpath):
ret["comment"] = "Created Logical Volume {}".format(name)
ret["changes"]["created"] = changes
else:
ret["comment"] = "Failed to create Logical Volume {}. Error: {}".format(
name, changes["Output from lvcreate"]
)
ret["result"] = False
else:
ret["comment"] = "Logical Volume {} already present".format(name)
if size or extents:
old_extents = int(lv_info["Current Logical Extents Associated"])
old_size_mb = _convert_to_mb(lv_info["Logical Volume Size"] + "s")
if size:
extents = old_extents
else:
# ignore percentage "extents" if the logical volume already exists
if "%" in str(extents):
ret[
"comment"
] = "Logical Volume {} already present, {} won't be resized.".format(
name, extents
)
extents = old_extents
size_mb = old_size_mb
if force is False and (size_mb < old_size_mb or extents < old_extents):
ret[
"comment"
] = "To reduce a Logical Volume option 'force' must be True."
ret["result"] = False
return ret
if size_mb != old_size_mb or extents != old_extents:
if __opts__["test"]:
ret["comment"] = "Logical Volume {} is set to be resized".format(
name
)
ret["result"] = None
return ret
else:
if size:
changes = __salt__["lvm.lvresize"](
lvpath=lvpath, size=size, resizefs=resizefs, force=force
)
else:
changes = __salt__["lvm.lvresize"](
lvpath=lvpath,
extents=extents,
resizefs=resizefs,
force=force,
)
if not changes:
ret[
"comment"
] = "Failed to resize Logical Volume. Unknown Error."
ret["result"] = False
lv_info = __salt__["lvm.lvdisplay"](lvpath, quiet=True)[lvpath]
new_size_mb = _convert_to_mb(lv_info["Logical Volume Size"] + "s")
if new_size_mb != old_size_mb:
ret["comment"] = "Resized Logical Volume {}".format(name)
ret["changes"]["resized"] = changes
else:
ret[
"comment"
] = "Failed to resize Logical Volume {}.\nError: {}".format(
name, changes["Output from lvresize"]
)
ret["result"] = False
return ret
def lv_absent(name, vgname=None):
"""
Remove a given existing Logical Volume from a named existing volume group
name
The Logical Volume to remove
vgname
The name of the Volume Group on which the Logical Volume resides
"""
ret = {"changes": {}, "comment": "", "name": name, "result": True}
lvpath = "/dev/{}/{}".format(vgname, name)
if not __salt__["lvm.lvdisplay"](lvpath, quiet=True):
ret["comment"] = "Logical Volume {} already absent".format(name)
elif __opts__["test"]:
ret["comment"] = "Logical Volume {} is set to be removed".format(name)
ret["result"] = None
return ret
else:
changes = __salt__["lvm.lvremove"](name, vgname)
if not __salt__["lvm.lvdisplay"](lvpath, quiet=True):
ret["comment"] = "Removed Logical Volume {}".format(name)
ret["changes"]["removed"] = changes
else:
ret["comment"] = "Failed to remove Logical Volume {}".format(name)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/lvm.py | 0.642432 | 0.237278 | lvm.py | pypi |
import logging
log = logging.getLogger(__name__)
def absent(name):
"""
Ensure that the named index template is absent.
name
Name of the index to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index_template = __salt__["elasticsearch.index_template_get"](name=name)
if index_template and name in index_template:
if __opts__["test"]:
ret["comment"] = "Index template {} will be removed".format(name)
ret["changes"]["old"] = index_template[name]
ret["result"] = None
else:
ret["result"] = __salt__["elasticsearch.index_template_delete"](
name=name
)
if ret["result"]:
ret["comment"] = "Successfully removed index template {}".format(
name
)
ret["changes"]["old"] = index_template[name]
else:
ret[
"comment"
] = "Failed to remove index template {} for unknown reasons".format(
name
)
else:
ret["comment"] = "Index template {} is already absent".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret
def present(name, definition):
"""
.. versionadded:: 2015.8.0
.. versionchanged:: 2017.3.0
Marked ``definition`` as required.
Ensure that the named index templat eis present.
name
Name of the index to add
definition
Required dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html
**Example:**
.. code-block:: yaml
mytestindex2_template:
elasticsearch_index_template.present:
- definition:
template: logstash-*
order: 1
settings:
number_of_shards: 1
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
try:
index_template_exists = __salt__["elasticsearch.index_template_exists"](
name=name
)
if not index_template_exists:
if __opts__["test"]:
ret[
"comment"
] = "Index template {} does not exist and will be created".format(name)
ret["changes"] = {"new": definition}
ret["result"] = None
else:
output = __salt__["elasticsearch.index_template_create"](
name=name, body=definition
)
if output:
ret["comment"] = "Successfully created index template {}".format(
name
)
ret["changes"] = {
"new": __salt__["elasticsearch.index_template_get"](name=name)[
name
]
}
else:
ret["result"] = False
ret["comment"] = "Cannot create index template {}, {}".format(
name, output
)
else:
ret["comment"] = "Index template {} is already present".format(name)
except Exception as err: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(err)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/elasticsearch_index_template.py | 0.499268 | 0.173638 | elasticsearch_index_template.py | pypi |
import errno
import logging
import os
log = logging.getLogger(__name__)
def _get_missing_results(results, dest_dir):
"""
Return a list of the filenames specified in the ``results`` argument, which
are not present in the dest_dir.
"""
try:
present = set(os.listdir(dest_dir))
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug("pkgbuild.built: dest_dir '%s' does not exist", dest_dir)
elif exc.errno == errno.EACCES:
log.error("pkgbuilt.built: cannot access dest_dir '%s'", dest_dir)
present = set()
return sorted(set(results).difference(present))
def built(
name,
runas,
dest_dir,
spec,
sources,
tgt,
template=None,
deps=None,
env=None,
results=None,
force=False,
saltenv="base",
log_dir="/var/log/salt/pkgbuild",
):
"""
Ensure that the named package is built and exists in the named directory
name
The name to track the build, the name value is otherwise unused
runas
The user to run the build process as
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
tgt
The target platform to run the build on
template
Run the spec file through a templating engine
.. versionchanged:: 2015.8.2
This argument is now optional, allowing for no templating engine to
be used if none is desired.
deps
Packages required to ensure that the named package is built
can be hosted on either the salt master server or on an HTTP
or FTP server. Both HTTPS and HTTP are supported as well as
downloading directly from Amazon S3 compatible URLs with both
pre-configured and automatic IAM credentials
env
A dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
results
The names of the expected rpms that will be built
force : False
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. versionadded:: 2015.8.2
saltenv
The saltenv to use for files downloaded from the salt filesever
log_dir : /var/log/salt/rpmbuild
Root directory for log files created from the build. Logs will be
organized by package name, version, OS release, and CPU architecture
under this directory.
.. versionadded:: 2015.8.2
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if not results:
ret["comment"] = "'results' argument is required"
ret["result"] = False
return ret
if isinstance(results, str):
results = results.split(",")
needed = _get_missing_results(results, dest_dir)
if not force and not needed:
ret["comment"] = "All needed packages exist"
return ret
if __opts__["test"]:
ret["result"] = None
if force:
ret["comment"] = "Packages will be force-built"
else:
ret["comment"] = "The following packages need to be built: "
ret["comment"] += ", ".join(needed)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret["comment"] = "Invalidly-formatted 'env' parameter. See documentation."
ret["result"] = False
return ret
func = "pkgbuild.build"
if __grains__.get("os_family", False) not in ("RedHat", "Suse"):
for res in results:
if res.endswith(".rpm"):
func = "rpmbuild.build"
break
ret["changes"] = __salt__[func](
runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv, log_dir
)
needed = _get_missing_results(results, dest_dir)
if needed:
ret["comment"] = "The following packages were not built: "
ret["comment"] += ", ".join(needed)
ret["result"] = False
else:
ret["comment"] = "All needed packages were built"
return ret
def repo(
name,
keyid=None,
env=None,
use_passphrase=False,
gnupghome="/etc/salt/gpgkeys",
runas="builder",
timeout=15.0,
):
"""
Make a package repository and optionally sign it and packages present
The name is directory to turn into a repo. This state is best used
with onchanges linked to your package building states.
name
The directory to find packages that will be in the repository
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository. Example:
.. code-block:: yaml
- env:
OPTIONS: 'ask-passphrase'
.. warning::
The above illustrates a common ``PyYAML`` pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other)
``PyYAML`` idiosyncrasies can be found :ref:`here
<yaml-idiosyncrasies>`.
Use of ``OPTIONS`` on some platforms, for example:
``ask-passphrase``, will require ``gpg-agent`` or similar to cache
passphrases.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with 'keyid'
runas : builder
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if __opts__["test"] is True:
ret["result"] = None
ret["comment"] = "Package repo metadata at {} will be refreshed".format(name)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret["comment"] = "Invalidly-formatted 'env' parameter. See documentation."
return ret
func = "pkgbuild.make_repo"
if __grains__.get("os_family", False) not in ("RedHat", "Suse"):
for file in os.listdir(name):
if file.endswith(".rpm"):
func = "rpmbuild.make_repo"
break
res = __salt__[func](name, keyid, env, use_passphrase, gnupghome, runas, timeout)
if res["retcode"] > 0:
ret["result"] = False
else:
ret["changes"] = {"refresh": True}
if res["stdout"] and res["stderr"]:
ret["comment"] = "{}\n{}".format(res["stdout"], res["stderr"])
elif res["stdout"]:
ret["comment"] = res["stdout"]
elif res["stderr"]:
ret["comment"] = res["stderr"]
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/pkgbuild.py | 0.572006 | 0.21767 | pkgbuild.py | pypi |
import logging
log = logging.getLogger(__name__)
def __virtual__():
if "libcloud_storage.list_containers" in __salt__:
return True
return (False, "libcloud_storage module could not be loaded")
def state_result(result, message, name, changes):
return {"result": result, "comment": message, "name": name, "changes": changes}
def container_present(name, profile):
"""
Ensures a container is present.
:param name: Container name
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
"""
containers = __salt__["libcloud_storage.list_containers"](profile)
match = [z for z in containers if z["name"] == name]
if len(match) > 0:
return state_result(True, "Container already exists", name, {})
else:
result = __salt__["libcloud_storage.create_container"](name, profile)
return state_result(True, "Created new container", name, result)
def container_absent(name, profile):
"""
Ensures a container is absent.
:param name: Container name
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
"""
containers = __salt__["libcloud_storage.list_containers"](profile)
match = [z for z in containers if z["name"] == name]
if len(match) == 0:
return state_result(True, "Container already absent", name, {})
else:
result = __salt__["libcloud_storage.delete_container"](name, profile)
return state_result(result, "Deleted container", name, {})
def object_present(container, name, path, profile):
"""
Ensures a object is presnt.
:param container: Container name
:type container: ``str``
:param name: Object name in cloud
:type name: ``str``
:param path: Local path to file
:type path: ``str``
:param profile: The profile key
:type profile: ``str``
"""
existing_object = __salt__["libcloud_storage.get_container_object"](
container, name, profile
)
if existing_object is not None:
return state_result(True, "Object already present", name, {})
else:
result = __salt__["libcloud_storage.upload_object"](
path, container, name, profile
)
return state_result(result, "Uploaded object", name, {})
def object_absent(container, name, profile):
"""
Ensures a object is absent.
:param container: Container name
:type container: ``str``
:param name: Object name in cloud
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
"""
existing_object = __salt__["libcloud_storage.get_container_object"](
container, name, profile
)
if existing_object is None:
return state_result(True, "Object already absent", name, {})
else:
result = __salt__["libcloud_storage.delete_object"](container, name, profile)
return state_result(result, "Deleted object", name, {})
def file_present(container, name, path, profile, overwrite_existing=False):
"""
Ensures a object is downloaded locally.
:param container: Container name
:type container: ``str``
:param name: Object name in cloud
:type name: ``str``
:param path: Local path to file
:type path: ``str``
:param profile: The profile key
:type profile: ``str``
:param overwrite_existing: Replace if already exists
:type overwrite_existing: ``bool``
"""
result = __salt__["libcloud_storage.download_object"](
path, container, name, profile, overwrite_existing
)
return state_result(result, "Downloaded object", name, {}) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/libcloud_storage.py | 0.77675 | 0.219296 | libcloud_storage.py | pypi |
import salt.utils.data
import salt.utils.path
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Define the module's virtual name
__virtualname__ = "shortcut"
def __virtual__():
"""
Only works on Windows systems
"""
if not salt.utils.platform.is_windows():
return False, "Shortcut state only available on Windows systems."
if not __salt__.get("shortcut.create", None):
return False, "Shortcut state requires the shortcut module."
return __virtualname__
def present(
name,
arguments="",
description="",
hot_key="",
icon_location="",
icon_index=0,
target="",
window_style="Normal",
working_dir="",
backup=False,
force=False,
make_dirs=False,
user=None,
):
r"""
Create a new shortcut. This can be a file shortcut (``.lnk``) or a url
shortcut (``.url``).
Args:
name (str): The full path to the shortcut
target (str): The full path to the target
arguments (str, optional): Any arguments to be passed to the target
description (str, optional): The description for the shortcut. This is
shown in the ``Comment`` field of the dialog box. Default is an
empty string
hot_key (str, optional): A combination of hot Keys to trigger this
shortcut. This is something like ``Ctrl+Alt+D``. This is shown in
the ``Shortcut key`` field in the dialog box. Default is an empty
string. Available options are:
- Ctrl
- Alt
- Shift
- Ext
icon_index (int, optional): The index for the icon to use in files that
contain multiple icons. Default is 0
icon_location (str, optional): The full path to a file containing icons.
This is shown in the ``Change Icon`` dialog box by clicking the
``Change Icon`` button. If no file is specified and a binary is
passed as the target, Windows will attempt to get the icon from the
binary file. Default is an empty string
window_style (str, optional): The window style the program should start
in. This is shown in the ``Run`` field of the dialog box. Default is
``Normal``. Valid options are:
- Normal
- Minimized
- Maximized
working_dir (str, optional): The full path to the working directory for
the program to run in. This is shown in the ``Start in`` field of
the dialog box.
backup (bool, optional): If there is already a shortcut with the same
name, set this value to ``True`` to backup the existing shortcut and
continue creating the new shortcut. Default is ``False``
force (bool, optional): If there is already a shortcut with the same
name and you aren't backing up the shortcut, set this value to
``True`` to remove the existing shortcut and create a new with these
settings. Default is ``False``
make_dirs (bool, optional): If the parent directory structure does not
exist for the new shortcut, create it. Default is ``False``
user (str, optional): The user to be the owner of any directories
created by setting ``make_dirs`` to ``True``. If no value is passed
Salt will use the user account that it is running under. Default is
an empty string.
Returns:
dict: A dictionary containing the changes, comments, and result of the
state
Example:
.. code-block:: yaml
KB123456:
wusa.installed:
- source: salt://kb123456.msu
# Create a shortcut and set the ``Shortcut key`` (``hot_key``)
new_shortcut:
shortcut.present:
- name: C:\path\to\shortcut.lnk
- target: C:\Windows\notepad.exe
- hot_key: Ctrl+Alt+N
# Create a shortcut and change the icon to the 3rd one in the icon file
new_shortcut:
shortcut.present:
- name: C:\path\to\shortcut.lnk
- target: C:\Windows\notepad.exe
- icon_location: C:\path\to\icon.ico
- icon_index: 2
# Create a shortcut and change the startup mode to full screen
new_shortcut:
shortcut.present:
- name: C:\path\to\shortcut.lnk
- target: C:\Windows\notepad.exe
- window_style: Maximized
# Create a shortcut and change the icon
new_shortcut:
shortcut.present:
- name: C:\path\to\shortcut.lnk
- target: C:\Windows\notepad.exe
- icon_location: C:\path\to\icon.ico
# Create a shortcut and force it to overwrite an existing shortcut
new_shortcut:
shortcut.present:
- name: C:\path\to\shortcut.lnk
- target: C:\Windows\notepad.exe
- force: True
# Create a shortcut and create any parent directories if they are missing
new_shortcut:
shortcut.present:
- name: C:\path\to\shortcut.lnk
- target: C:\Windows\notepad.exe
- make_dirs: True
"""
ret = {"name": name, "changes": {}, "result": True, "comment": []}
proposed = {
"arguments": arguments,
"description": description,
"hot_key": hot_key,
"icon_location": salt.utils.path.expand(icon_location),
"icon_index": icon_index,
"path": salt.utils.path.expand(name),
"target": salt.utils.path.expand(target),
"window_style": window_style,
"working_dir": salt.utils.path.expand(working_dir),
}
try:
old = __salt__["shortcut.get"](name)
changes = salt.utils.data.compare_dicts(old, proposed)
if not changes:
ret["comment"] = "Shortcut already present and configured"
return ret
except CommandExecutionError:
changes = {}
if __opts__["test"]:
if changes:
ret["comment"] = "Shortcut will be modified: {}".format(name)
ret["changes"] = changes
else:
ret["comment"] = "Shortcut will be created: {}".format(name)
ret["result"] = None
return ret
try:
__salt__["shortcut.create"](
arguments=arguments,
description=description,
hot_key=hot_key,
icon_location=icon_location,
icon_index=icon_index,
path=name,
target=target,
window_style=window_style,
working_dir=working_dir,
backup=backup,
force=force,
make_dirs=make_dirs,
user=user,
)
except CommandExecutionError as exc:
ret["comment"] = ["Failed to create the shortcut: {}".format(name)]
ret["comment"].append(exc.message)
ret["result"] = False
return ret
try:
new = __salt__["shortcut.get"](name)
except CommandExecutionError as exc:
ret["comment"] = ["Failed to create the shortcut: {}".format(name)]
ret["comment"].append(exc.message)
ret["result"] = False
return ret
verify_changes = salt.utils.data.compare_dicts(new, proposed)
if verify_changes:
ret["comment"] = "Failed to make the following changes:"
ret["changes"]["failed"] = verify_changes
ret["result"] = False
return ret
if changes:
ret["comment"] = "Shortcut modified: {}".format(name)
ret["changes"] = changes
else:
ret["comment"] = "Shortcut created: {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_shortcut.py | 0.764628 | 0.245673 | win_shortcut.py | pypi |
import json
import logging
from salt.exceptions import SaltException
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make these states available if Zabbix module and run_query function is available
and all 3rd party modules imported.
"""
if "zabbix.run_query" in __salt__:
return True
return False, "Import zabbix or other needed modules failed."
def present(name, params, **kwargs):
"""
Creates Zabbix Action object or if differs update it according defined parameters
:param name: Zabbix Action name
:param params: Definition of the Zabbix Action
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
If there is a need to get a value from current zabbix online (e.g. id of a hostgroup you want to put a discovered
system into), put a dictionary with two keys "query_object" and "query_name" instead of the value.
In this example we want to get object id of hostgroup named "Virtual machines" and "Databases".
.. code-block:: yaml
zabbix-action-present:
zabbix_action.present:
- name: VMs
- params:
eventsource: 2
status: 0
filter:
evaltype: 2
conditions:
- conditiontype: 24
operator: 2
value: 'virtual'
- conditiontype: 24
operator: 2
value: 'kvm'
operations:
- operationtype: 2
- operationtype: 4
opgroup:
- groupid:
query_object: hostgroup
query_name: Virtual machines
- groupid:
query_object: hostgroup
query_name: Databases
"""
zabbix_id_mapper = __salt__["zabbix.get_zabbix_id_mapper"]()
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
# Create input params substituting functions with their results
params["name"] = name
params["operations"] = params["operations"] if "operations" in params else []
if "filter" in params:
params["filter"]["conditions"] = (
params["filter"]["conditions"] if "conditions" in params["filter"] else []
)
input_params = __salt__["zabbix.substitute_params"](params, **kwargs)
log.info(
"Zabbix Action: input params: %s",
str(json.dumps(input_params, indent=4)),
)
search = {
"output": "extend",
"selectOperations": "extend",
"selectFilter": "extend",
"filter": {"name": name},
}
# GET Action object if exists
action_get = __salt__["zabbix.run_query"]("action.get", search, **kwargs)
log.info(
"Zabbix Action: action.get result: %s",
str(json.dumps(action_get, indent=4)),
)
existing_obj = (
__salt__["zabbix.substitute_params"](action_get[0], **kwargs)
if action_get and len(action_get) == 1
else False
)
if existing_obj:
diff_params = __salt__["zabbix.compare_params"](input_params, existing_obj)
log.info(
"Zabbix Action: input params: {%s",
str(json.dumps(input_params, indent=4)),
)
log.info(
"Zabbix Action: Object comparison result. Differences: %s",
str(diff_params),
)
if diff_params:
diff_params[zabbix_id_mapper["action"]] = existing_obj[
zabbix_id_mapper["action"]
]
# diff_params['name'] = 'VMs' - BUG - https://support.zabbix.com/browse/ZBX-12078
log.info(
"Zabbix Action: update params: %s",
str(json.dumps(diff_params, indent=4)),
)
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" would be fixed.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Action "{}" differs '
"in following parameters: {}".format(name, diff_params)
),
"new": (
'Zabbix Action "{}" would correspond to definition.'.format(
name
)
),
}
}
else:
action_update = __salt__["zabbix.run_query"](
"action.update", diff_params, **kwargs
)
log.info(
"Zabbix Action: action.update result: %s",
str(action_update),
)
if action_update:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" updated.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Action "{}" differed '
"in following parameters: {}".format(name, diff_params)
),
"new": 'Zabbix Action "{}" fixed.'.format(name),
}
}
else:
ret["result"] = True
ret[
"comment"
] = 'Zabbix Action "{}" already exists and corresponds to a definition.'.format(
name
)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" would be created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" does not exist.'.format(name),
"new": (
'Zabbix Action "{}" would be created according definition.'.format(
name
)
),
}
}
else:
# ACTION.CREATE
action_create = __salt__["zabbix.run_query"](
"action.create", input_params, **kwargs
)
log.info("Zabbix Action: action.create result: %s", str(action_create))
if action_create:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" did not exist.'.format(name),
"new": (
'Zabbix Action "{}" created according definition.'.format(
name
)
),
}
}
return ret
def absent(name, **kwargs):
"""
Makes the Zabbix Action to be absent (either does not exist or delete it).
:param name: Zabbix Action name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-action-absent:
zabbix_action.absent:
- name: Action name
"""
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
try:
object_id = __salt__["zabbix.get_object_id_by_params"](
"action", {"filter": {"name": name}}, **kwargs
)
except SaltException:
object_id = False
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" does not exist.'.format(name)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" would be deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" exists.'.format(name),
"new": 'Zabbix Action "{}" would be deleted.'.format(name),
}
}
else:
action_delete = __salt__["zabbix.run_query"](
"action.delete", [object_id], **kwargs
)
if action_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Action "{}" deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Action "{}" existed.'.format(name),
"new": 'Zabbix Action "{}" deleted.'.format(name),
}
}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/zabbix_action.py | 0.761272 | 0.237388 | zabbix_action.py | pypi |
import logging
log = logging.getLogger(__name__)
def __virtual__():
if "csf.exists" in __salt__:
return "csf"
return (False, "csf module could not be loaded")
def rule_present(
name,
method,
port=None,
proto="tcp",
direction="in",
port_origin="d",
ip_origin="s",
ttl=None,
comment="",
reload=False,
):
"""
Ensure iptable rule exists.
name
The ip address or CIDR for the rule.
method
The type of rule. Either 'allow' or 'deny'.
port
Optional port to be open or closed for the
iptables rule.
proto
The protocol. Either 'tcp', or 'udp'.
Only applicable if port is specified.
direction
The diretion of traffic to apply the rule to.
Either 'in', or 'out'. Only applicable if
port is specified.
port_origin
Specifies either the source or destination
port is relevant for this rule. Only applicable
if port is specified. Either 's', or 'd'.
ip_origin
Specifies whether the ip in this rule refers to
the source or destination ip. Either 's', or
'd'. Only applicable if port is specified.
ttl
How long the rule should exist. If supplied,
`csf.tempallow()` or csf.tempdeny()` are used.
comment
An optional comment to appear after the rule
as a #comment .
reload
Reload the csf service after applying this rule.
Default false.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Rule already exists.",
}
ip = name
# Check if rule is already present
exists = __salt__["csf.exists"](
method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
comment=comment,
)
if exists:
return ret
else:
if ttl:
method = "temp{}".format(method)
func = __salt__["csf.{}".format(method)]
rule = func(
ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
comment=comment,
)
if rule:
comment = "Rule has been added."
if reload:
if __salt__["csf.reload"]():
comment += " Csf reloaded."
else:
comment += " Unable to reload csf."
ret["result"] = False
ret["comment"] = comment
ret["changes"]["Rule"] = "Created"
return ret
def rule_absent(
name,
method,
port=None,
proto="tcp",
direction="in",
port_origin="d",
ip_origin="s",
ttl=None,
reload=False,
):
"""
Ensure iptable is not present.
name
The ip address or CIDR for the rule.
method
The type of rule. Either 'allow' or 'deny'.
port
Optional port to be open or closed for the
iptables rule.
proto
The protocol. Either 'tcp', 'udp'.
Only applicable if port is specified.
direction
The diretion of traffic to apply the rule to.
Either 'in', or 'out'. Only applicable if
port is specified.
port_origin
Specifies either the source or destination
port is relevant for this rule. Only applicable
if port is specified. Either 's', or 'd'.
ip_origin
Specifies whether the ip in this rule refers to
the source or destination ip. Either 's', or
'd'. Only applicable if port is specified.
ttl
How long the rule should exist. If supplied,
`csf.tempallow()` or csf.tempdeny()` are used.
reload
Reload the csf service after applying this rule.
Default false.
"""
ip = name
ret = {"name": name, "changes": {}, "result": True, "comment": "Rule not present."}
exists = __salt__["csf.exists"](
method,
ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
ttl=ttl,
)
if not exists:
return ret
else:
rule = __salt__["csf.remove_rule"](
method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
comment="",
ttl=ttl,
)
if rule:
comment = "Rule has been removed."
if reload:
if __salt__["csf.reload"]():
comment += " Csf reloaded."
else:
comment += "Csf unable to be reloaded."
ret["comment"] = comment
ret["changes"]["Rule"] = "Removed"
return ret
def ports_open(name, ports, proto="tcp", direction="in"):
"""
Ensure ports are open for a protocol, in a direction.
e.g. - proto='tcp', direction='in' would set the values
for TCP_IN in the csf.conf file.
ports
A list of ports that should be open.
proto
The protocol. May be one of 'tcp', 'udp',
'tcp6', or 'udp6'.
direction
Choose 'in', 'out', or both to indicate the port
should be opened for inbound traffic, outbound
traffic, or both.
"""
ports = list(map(str, ports))
diff = False
ret = {
"name": ",".join(ports),
"changes": {},
"result": True,
"comment": "Ports open.",
}
current_ports = __salt__["csf.get_ports"](proto=proto, direction=direction)
direction = direction.upper()
directions = __salt__["csf.build_directions"](direction)
for direction in directions:
log.trace("current_ports[direction]: %s", current_ports[direction])
log.trace("ports: %s", ports)
if current_ports[direction] != ports:
diff = True
if diff:
result = __salt__["csf.allow_ports"](ports, proto=proto, direction=direction)
ret["changes"]["Ports"] = "Changed"
ret["comment"] = result
return ret
def nics_skip(name, nics, ipv6):
"""
Alias for :mod:`csf.nics_skipped <salt.states.csf.nics_skipped>`
"""
return nics_skipped(name, nics=nics, ipv6=ipv6)
def nics_skipped(name, nics, ipv6=False):
"""
name
Meaningless arg, but required for state.
nics
A list of nics to skip.
ipv6
Boolean. Set to true if you want to skip
the ipv6 interface. Default false (ipv4).
"""
ret = {
"name": ",".join(nics),
"changes": {},
"result": True,
"comment": "NICs skipped.",
}
current_skipped_nics = __salt__["csf.get_skipped_nics"](ipv6=ipv6)
if nics == current_skipped_nics:
return ret
result = __salt__["csf.skip_nics"](nics, ipv6=ipv6)
ret["changes"]["Skipped NICs"] = "Changed"
return ret
def testing_on(name, reload=False):
"""
Ensure testing mode is enabled in csf.
reload
Reload CSF after changing the testing status.
Default false.
"""
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Testing mode already ON.",
}
result = {}
testing = __salt__["csf.get_testing_status"]()
if int(testing) == 1:
return ret
enable = __salt__["csf.enable_testing_mode"]()
if enable:
comment = "Csf testing mode enabled"
if reload:
if __salt__["csf.reload"]():
comment += " and csf reloaded."
ret["changes"]["Testing Mode"] = "on"
ret["comment"] = result
return ret
def testing_off(name, reload=False):
"""
Ensure testing mode is enabled in csf.
reload
Reload CSF after changing the testing status.
Default false.
"""
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Testing mode already OFF.",
}
result = {}
testing = __salt__["csf.get_testing_status"]()
if int(testing) == 0:
return ret
disable = __salt__["csf.disable_testing_mode"]()
if disable:
comment = "Csf testing mode disabled"
if reload:
if __salt__["csf.reload"]():
comment += " and csf reloaded."
ret["changes"]["Testing Mode"] = "off"
ret["comment"] = comment
return ret
def option_present(name, value, reload=False):
"""
Ensure the state of a particular option/setting in csf.
name
The option name in csf.conf
value
The value it should be set to.
reload
Boolean. If set to true, csf will be reloaded after.
"""
ret = {
"name": "testing mode",
"changes": {},
"result": True,
"comment": "Option already present.",
}
option = name
current_option = __salt__["csf.get_option"](option)
if current_option:
l = __salt__["csf.split_option"](current_option)
option_value = l[1]
if '"{}"'.format(value) == option_value:
return ret
else:
result = __salt__["csf.set_option"](option, value)
ret["comment"] = "Option modified."
ret["changes"]["Option"] = "Changed"
else:
result = __salt__["file.append"](
"/etc/csf/csf.conf", args='{} = "{}"'.format(option, value)
)
ret["comment"] = "Option not present. Appended to csf.conf"
ret["changes"]["Option"] = "Changed."
if reload:
if __salt__["csf.reload"]():
ret["comment"] += ". Csf reloaded."
else:
ret["comment"] += ". Csf failed to reload."
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/csf.py | 0.691497 | 0.209814 | csf.py | pypi |
import logging
import operator
import sys
import time
# Initialize logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "loop"
def __virtual__():
return True
def until(name, m_args=None, m_kwargs=None, condition=None, period=1, timeout=60):
"""
Loop over an execution module until a condition is met.
:param str name: The name of the execution module
:param list m_args: The execution module's positional arguments
:param dict m_kwargs: The execution module's keyword arguments
:param str condition: The condition which must be met for the loop to break.
This should contain ``m_ret`` which is the return from the execution module.
:param period: The number of seconds to wait between executions
:type period: int or float
:param timeout: The timeout in seconds
:type timeout: int or float
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if m_args is None:
m_args = ()
if m_kwargs is None:
m_kwargs = {}
if name not in __salt__:
ret["comment"] = "Cannot find module {}".format(name)
elif condition is None:
ret["comment"] = "An exit condition must be specified"
elif not isinstance(period, (int, float)):
ret["comment"] = "Period must be specified as a float in seconds"
elif not isinstance(timeout, (int, float)):
ret["comment"] = "Timeout must be specified as a float in seconds"
elif __opts__["test"]:
ret["comment"] = "The execution module {} will be run".format(name)
ret["result"] = None
else:
if m_args is None:
m_args = []
if m_kwargs is None:
m_kwargs = {}
timeout = time.time() + timeout
while time.time() < timeout:
m_ret = __salt__[name](*m_args, **m_kwargs)
if eval(condition): # pylint: disable=W0123
ret["result"] = True
ret["comment"] = "Condition {} was met".format(condition)
break
time.sleep(period)
else:
ret["comment"] = "Timed out while waiting for condition {}".format(
condition
)
return ret
def until_no_eval(
name,
expected,
compare_operator="eq",
timeout=60,
period=1,
init_wait=0,
args=None,
kwargs=None,
):
"""
Generic waiter state that waits for a specific salt function to produce an
expected result.
The state fails if the function does not exist or raises an exception,
or does not produce the expected result within the allotted retries.
:param str name: Name of the module.function to call
:param expected: Expected return value. This can be almost anything.
:param str compare_operator: Operator to use to compare the result of the
module.function call with the expected value. This can be anything present
in __salt__ or __utils__. Will be called with 2 args: result, expected.
:param timeout: Abort after this amount of seconds (excluding init_wait).
:type timeout: int or float
:param period: Time (in seconds) to wait between attempts.
:type period: int or float
:param init_wait: Time (in seconds) to wait before trying anything.
:type init_wait: int or float
:param list args: args to pass to the salt module.function.
:param dict kwargs: kwargs to pass to the salt module.function.
.. versionadded:: 3000
"""
ret = {"name": name, "comment": "", "changes": {}, "result": False}
if name not in __salt__:
ret["comment"] = 'Module.function "{}" is unavailable.'.format(name)
elif not isinstance(period, (int, float)):
ret["comment"] = "Period must be specified as a float in seconds"
elif not isinstance(timeout, (int, float)):
ret["comment"] = "Timeout must be specified as a float in seconds"
elif compare_operator in __salt__:
comparator = __salt__[compare_operator]
elif compare_operator in __utils__:
comparator = __utils__[compare_operator]
elif not hasattr(operator, compare_operator):
ret["comment"] = 'Invalid operator "{}" supplied.'.format(compare_operator)
else:
comparator = getattr(operator, compare_operator)
if __opts__["test"]:
ret["result"] = None
ret["comment"] = 'Would have waited for "{}" to produce "{}".'.format(
name, expected
)
if ret["comment"]:
return ret
if init_wait:
time.sleep(init_wait)
if args is None:
args = []
if kwargs is None:
kwargs = {}
res_archive = []
current_attempt = 0
timeout = time.time() + timeout
while time.time() < timeout:
current_attempt += 1
try:
res = __salt__[name](*args, **kwargs)
except Exception: # pylint: disable=broad-except
(exc_type, exc_value, _) = sys.exc_info()
ret["comment"] = "Exception occurred while executing {}: {}:{}".format(
name, exc_type, exc_value
)
break
res_archive.append(res)
cmp_res = comparator(res, expected)
log.debug(
"%s:until_no_eval:\n"
"\t\tAttempt %s, result: %s, expected: %s, compare result: %s",
__name__,
current_attempt,
res,
expected,
cmp_res,
)
if cmp_res:
ret["result"] = True
ret["comment"] = "Call provided the expected results in {} attempts".format(
current_attempt
)
break
time.sleep(period)
else:
ret[
"comment"
] = "Call did not produce the expected result after {} attempts".format(
current_attempt
)
log.debug(
"%s:until_no_eval:\n\t\tResults of all attempts: %s",
__name__,
res_archive,
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/loop.py | 0.494385 | 0.284228 | loop.py | pypi |
import os
import salt.utils.platform
from salt.exceptions import CommandNotFoundError
# Define the state's virtual name
__virtualname__ = "ssh_known_hosts"
def __virtual__():
"""
Does not work on Windows, requires ssh module functions
"""
if salt.utils.platform.is_windows():
return False, "ssh_known_hosts: Does not support Windows"
return __virtualname__
def present(
name,
user=None,
fingerprint=None,
key=None,
port=None,
enc=None,
config=None,
hash_known_hosts=True,
timeout=5,
fingerprint_hash_type=None,
):
"""
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa,
ssh-rsa, ssh-dss or any other type as of openssh server version 8.7.
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0
default changed from ``md5`` to ``sha256``
"""
ret = {
"name": name,
"changes": {},
"result": None if __opts__["test"] else True,
"comment": "",
}
if not user:
config = config or "/etc/ssh/ssh_known_hosts"
else:
config = config or ".ssh/known_hosts"
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret["result"] = False
return dict(ret, comment=comment)
if __opts__["test"]:
if key and fingerprint:
comment = 'Specify either "key" or "fingerprint", not both.'
ret["result"] = False
return dict(ret, comment=comment)
elif key and not enc:
comment = 'Required argument "enc" if using "key" argument.'
ret["result"] = False
return dict(ret, comment=comment)
try:
result = __salt__["ssh.check_known_host"](
user,
name,
key=key,
fingerprint=fingerprint,
config=config,
port=port,
fingerprint_hash_type=fingerprint_hash_type,
)
except CommandNotFoundError as err:
ret["result"] = False
ret["comment"] = "ssh.check_known_host error: {}".format(err)
return ret
if result == "exists":
comment = "Host {} is already in {}".format(name, config)
ret["result"] = True
return dict(ret, comment=comment)
elif result == "add":
comment = "Key for {} is set to be added to {}".format(name, config)
return dict(ret, comment=comment)
else: # 'update'
comment = "Key for {} is set to be updated in {}".format(name, config)
return dict(ret, comment=comment)
result = __salt__["ssh.set_known_host"](
user=user,
hostname=name,
fingerprint=fingerprint,
key=key,
port=port,
enc=enc,
config=config,
hash_known_hosts=hash_known_hosts,
timeout=timeout,
fingerprint_hash_type=fingerprint_hash_type,
)
if result["status"] == "exists":
return dict(ret, comment="{} already exists in {}".format(name, config))
elif result["status"] == "error":
return dict(ret, result=False, comment=result["error"])
else: # 'updated'
if key:
new_key = result["new"][0]["key"]
return dict(
ret,
changes={"old": result["old"], "new": result["new"]},
comment="{}'s key saved to {} (key: {})".format(name, config, new_key),
)
else:
fingerprint = result["new"][0]["fingerprint"]
return dict(
ret,
changes={"old": result["old"], "new": result["new"]},
comment="{}'s key saved to {} (fingerprint: {})".format(
name, config, fingerprint
),
)
def absent(name, user=None, config=None):
"""
Verifies that the specified host is not known by the given user
name
The host name
Note that only single host names are supported. If foo.example.com
and bar.example.com are the same machine and you need to exclude both,
you will need one Salt state for each.
user
The user who owns the ssh authorized keys file to modify
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not user:
config = config or "/etc/ssh/ssh_known_hosts"
else:
config = config or ".ssh/known_hosts"
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret["result"] = False
return dict(ret, comment=comment)
known_host = __salt__["ssh.get_known_host_entries"](
user=user, hostname=name, config=config
)
if not known_host:
return dict(ret, comment="Host is already absent")
if __opts__["test"]:
comment = "Key for {} is set to be removed from {}".format(name, config)
ret["result"] = None
return dict(ret, comment=comment)
rm_result = __salt__["ssh.rm_known_host"](user=user, hostname=name, config=config)
if rm_result["status"] == "error":
return dict(ret, result=False, comment=rm_result["error"])
else:
return dict(
ret,
changes={"old": known_host, "new": None},
result=True,
comment=rm_result["comment"],
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/ssh_known_hosts.py | 0.603114 | 0.181481 | ssh_known_hosts.py | pypi |
import logging
import salt.utils.data
# Enable proper logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Define the module's virtual name
__virtualname__ = "docker_volume"
__virtual_aliases__ = ("moby_volume",)
def __virtual__():
"""
Only load if the docker execution module is available
"""
if "docker.version" in __salt__:
return __virtualname__
return (False, __salt__.missing_fun_string("docker.version"))
def _find_volume(name):
"""
Find volume by name on minion
"""
docker_volumes = __salt__["docker.volumes"]()["Volumes"]
if docker_volumes:
volumes = [v for v in docker_volumes if v["Name"] == name]
if volumes:
return volumes[0]
return None
def present(name, driver=None, driver_opts=None, force=False):
"""
Ensure that a volume is present.
.. versionadded:: 2015.8.4
.. versionchanged:: 2015.8.6
This state no longer deletes and re-creates a volume if the existing
volume's driver does not match the ``driver`` parameter (unless the
``force`` parameter is set to ``True``).
.. versionchanged:: 2017.7.0
This state was renamed from **docker.volume_present** to **docker_volume.present**
name
Name of the volume
driver
Type of driver for that volume. If ``None`` and the volume
does not yet exist, the volume will be created using Docker's
default driver. If ``None`` and the volume does exist, this
function does nothing, even if the existing volume's driver is
not the Docker default driver. (To ensure that an existing
volume's driver matches the Docker default, you must
explicitly name Docker's default driver here.)
driver_opts
Options for the volume driver
force : False
If the volume already exists but the existing volume's driver
does not match the driver specified by the ``driver``
parameter, this parameter controls whether the function errors
out (if ``False``) or deletes and re-creates the volume (if
``True``).
.. versionadded:: 2015.8.6
Usage Examples:
.. code-block:: yaml
volume_foo:
docker_volume.present
.. code-block:: yaml
volume_bar:
docker_volume.present
- name: bar
- driver: local
- driver_opts:
foo: bar
.. code-block:: yaml
volume_bar:
docker_volume.present
- name: bar
- driver: local
- driver_opts:
- foo: bar
- option: value
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if salt.utils.data.is_dictlist(driver_opts):
driver_opts = salt.utils.data.repack_dictlist(driver_opts)
volume = _find_volume(name)
if not volume:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The volume '{}' will be created".format(name)
return ret
try:
ret["changes"]["created"] = __salt__["docker.create_volume"](
name, driver=driver, driver_opts=driver_opts
)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Failed to create volume '{}': {}".format(name, exc)
return ret
else:
result = True
ret["result"] = result
return ret
# volume exists, check if driver is the same.
if driver is not None and volume["Driver"] != driver:
if not force:
ret["comment"] = (
"Driver for existing volume '{}' ('{}')"
" does not match specified driver ('{}')"
" and force is False".format(name, volume["Driver"], driver)
)
ret["result"] = None if __opts__["test"] else False
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"The volume '{}' will be replaced with a"
" new one using the driver '{}'".format(name, volume)
)
return ret
try:
ret["changes"]["removed"] = __salt__["docker.remove_volume"](name)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Failed to remove volume '{}': {}".format(name, exc)
return ret
else:
try:
ret["changes"]["created"] = __salt__["docker.create_volume"](
name, driver=driver, driver_opts=driver_opts
)
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Failed to create volume '{}': {}".format(name, exc)
return ret
else:
result = True
ret["result"] = result
return ret
ret["result"] = True
ret["comment"] = "Volume '{}' already exists.".format(name)
return ret
def absent(name, driver=None):
"""
Ensure that a volume is absent.
.. versionadded:: 2015.8.4
.. versionchanged:: 2017.7.0
This state was renamed from **docker.volume_absent** to **docker_volume.absent**
name
Name of the volume
Usage Examples:
.. code-block:: yaml
volume_foo:
docker_volume.absent
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
volume = _find_volume(name)
if not volume:
ret["result"] = True
ret["comment"] = "Volume '{}' already absent".format(name)
return ret
try:
ret["changes"]["removed"] = __salt__["docker.remove_volume"](name)
ret["result"] = True
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = "Failed to remove volume '{}': {}".format(name, exc)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/docker_volume.py | 0.595022 | 0.165155 | docker_volume.py | pypi |
def __virtual__():
"""
Only make these states available if Open vSwitch module is available.
"""
if "openvswitch.port_add" in __salt__:
return True
return (False, "openvswitch module could not be loaded")
def present(
name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False
):
"""
Ensures that the named port exists on bridge, eventually creates it.
Args:
name: The name of the port.
bridge: The name of the bridge.
tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre.
id: Optional tunnel's key.
remote: Remote endpoint's IP address.
dst_port: Port to use when creating tunnelport in the switch.
internal: Create an internal port if one does not exist
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
tunnel_types = ("vlan", "vxlan", "gre")
if tunnel_type and tunnel_type not in tunnel_types:
raise TypeError(
"The optional type argument must be one of these values: {}.".format(
str(tunnel_types)
)
)
bridge_exists = __salt__["openvswitch.bridge_exists"](bridge)
port_list = []
if bridge_exists:
port_list = __salt__["openvswitch.port_list"](bridge)
# Comment and change messages
comments = {}
comments["comment_bridge_notexists"] = "Bridge {} does not exist.".format(bridge)
comments["comment_port_exists"] = "Port {} already exists.".format(name)
comments["comment_port_created"] = "Port {} created on bridge {}.".format(
name, bridge
)
comments[
"comment_port_notcreated"
] = "Unable to create port {} on bridge {}.".format(name, bridge)
comments["changes_port_created"] = {
name: {
"old": "No port named {} present.".format(name),
"new": "Created port {1} on bridge {0}.".format(bridge, name),
}
}
comments[
"comment_port_internal"
] = "Port {} already exists, but interface type has been changed to internal.".format(
name
)
comments["changes_port_internal"] = {"internal": {"old": False, "new": True}}
comments["comment_port_internal_not_changed"] = (
"Port {} already exists, but the interface type could not be changed to"
" internal.".format(name)
)
if tunnel_type:
comments["comment_invalid_ip"] = "Remote is not valid ip address."
if tunnel_type == "vlan":
comments["comment_vlan_invalid_id"] = "VLANs id must be between 0 and 4095."
comments[
"comment_vlan_invalid_name"
] = "Could not find network interface {}.".format(name)
comments[
"comment_vlan_port_exists"
] = "Port {} with access to VLAN {} already exists on bridge {}.".format(
name, id, bridge
)
comments[
"comment_vlan_created"
] = "Created port {} with access to VLAN {} on bridge {}.".format(
name, id, bridge
)
comments[
"comment_vlan_notcreated"
] = "Unable to create port {} with access to VLAN {} on bridge {}.".format(
name, id, bridge
)
comments["changes_vlan_created"] = {
name: {
"old": (
"No port named {} with access to VLAN {} present on "
"bridge {} present.".format(name, id, bridge)
),
"new": (
"Created port {1} with access to VLAN {2} on "
"bridge {0}.".format(bridge, name, id)
),
}
}
elif tunnel_type == "gre":
comments[
"comment_gre_invalid_id"
] = "Id of GRE tunnel must be an unsigned 32-bit integer."
comments["comment_gre_interface_exists"] = (
"GRE tunnel interface {} with rempte ip {} and key {} "
"already exists on bridge {}.".format(name, remote, id, bridge)
)
comments["comment_gre_created"] = (
"Created GRE tunnel interface {} with remote ip {} and key {} "
"on bridge {}.".format(name, remote, id, bridge)
)
comments["comment_gre_notcreated"] = (
"Unable to create GRE tunnel interface {} with remote ip {} and key {} "
"on bridge {}.".format(name, remote, id, bridge)
)
comments["changes_gre_created"] = {
name: {
"old": (
"No GRE tunnel interface {} with remote ip {} and key {} "
"on bridge {} present.".format(name, remote, id, bridge)
),
"new": (
"Created GRE tunnel interface {} with remote ip {} and key {} "
"on bridge {}.".format(name, remote, id, bridge)
),
}
}
elif tunnel_type == "vxlan":
comments["comment_dstport"] = (
" (dst_port" + str(dst_port) + ")" if 0 < dst_port <= 65535 else ""
)
comments[
"comment_vxlan_invalid_id"
] = "Id of VXLAN tunnel must be an unsigned 64-bit integer."
comments["comment_vxlan_interface_exists"] = (
"VXLAN tunnel interface {} with rempte ip {} and key {} "
"already exists on bridge {}{}.".format(
name, remote, id, bridge, comments["comment_dstport"]
)
)
comments["comment_vxlan_created"] = (
"Created VXLAN tunnel interface {} with remote ip {} and key {} "
"on bridge {}{}.".format(
name, remote, id, bridge, comments["comment_dstport"]
)
)
comments["comment_vxlan_notcreated"] = (
"Unable to create VXLAN tunnel interface {} with remote ip {} and key"
" {} on bridge {}{}.".format(
name, remote, id, bridge, comments["comment_dstport"]
)
)
comments["changes_vxlan_created"] = {
name: {
"old": (
"No VXLAN tunnel interface {} with remote ip {} and key {} "
"on bridge {}{} present.".format(
name, remote, id, bridge, comments["comment_dstport"]
)
),
"new": (
"Created VXLAN tunnel interface {} with remote ip {} and key {}"
" on bridge {}{}.".format(
name, remote, id, bridge, comments["comment_dstport"]
)
),
}
}
# Check VLANs attributes
def _check_vlan():
tag = __salt__["openvswitch.port_get_tag"](name)
interfaces = __salt__["network.interfaces"]()
if not 0 <= id <= 4095:
ret["result"] = False
ret["comment"] = comments["comment_vlan_invalid_id"]
elif not internal and name not in interfaces:
ret["result"] = False
ret["comment"] = comments["comment_vlan_invalid_name"]
elif tag and name in port_list:
try:
if int(tag[0]) == id:
ret["result"] = True
ret["comment"] = comments["comment_vlan_port_exists"]
except (ValueError, KeyError):
pass
# Check GRE tunnels attributes
def _check_gre():
interface_options = __salt__["openvswitch.interface_get_options"](name)
interface_type = __salt__["openvswitch.interface_get_type"](name)
if not 0 <= id <= 2 ** 32:
ret["result"] = False
ret["comment"] = comments["comment_gre_invalid_id"]
elif not __salt__["dig.check_ip"](remote):
ret["result"] = False
ret["comment"] = comments["comment_invalid_ip"]
elif interface_options and interface_type and name in port_list:
interface_attroptions = (
'{key="' + str(id) + '", remote_ip="' + str(remote) + '"}'
)
try:
if (
interface_type[0] == "gre"
and interface_options[0] == interface_attroptions
):
ret["result"] = True
ret["comment"] = comments["comment_gre_interface_exists"]
except KeyError:
pass
# Check VXLAN tunnels attributes
def _check_vxlan():
interface_options = __salt__["openvswitch.interface_get_options"](name)
interface_type = __salt__["openvswitch.interface_get_type"](name)
if not 0 <= id <= 2 ** 64:
ret["result"] = False
ret["comment"] = comments["comment_vxlan_invalid_id"]
elif not __salt__["dig.check_ip"](remote):
ret["result"] = False
ret["comment"] = comments["comment_invalid_ip"]
elif interface_options and interface_type and name in port_list:
opt_port = (
'dst_port="' + str(dst_port) + '", ' if 0 < dst_port <= 65535 else ""
)
interface_attroptions = (
'{{{0}key="'.format(opt_port)
+ str(id)
+ '", remote_ip="'
+ str(remote)
+ '"}'
)
try:
if (
interface_type[0] == "vxlan"
and interface_options[0] == interface_attroptions
):
ret["result"] = True
ret["comment"] = comments["comment_vxlan_interface_exists"]
except KeyError:
pass
# Dry run, test=true mode
if __opts__["test"]:
if bridge_exists:
if tunnel_type == "vlan":
_check_vlan()
if not ret["comment"]:
ret["result"] = None
ret["comment"] = comments["comment_vlan_created"]
elif tunnel_type == "vxlan":
_check_vxlan()
if not ret["comment"]:
ret["result"] = None
ret["comment"] = comments["comment_vxlan_created"]
elif tunnel_type == "gre":
_check_gre()
if not ret["comment"]:
ret["result"] = None
ret["comment"] = comments["comment_gre_created"]
else:
if name in port_list:
ret["result"] = True
current_type = __salt__["openvswitch.interface_get_type"](name)
# The interface type is returned as a single-element list.
if internal and (current_type != ["internal"]):
ret["comment"] = comments["comment_port_internal"]
else:
ret["comment"] = comments["comment_port_exists"]
else:
ret["result"] = None
ret["comment"] = comments["comment_port_created"]
else:
ret["result"] = None
ret["comment"] = comments["comment_bridge_notexists"]
return ret
if bridge_exists:
if tunnel_type == "vlan":
_check_vlan()
if not ret["comment"]:
port_create_vlan = __salt__["openvswitch.port_create_vlan"](
bridge, name, id, internal
)
if port_create_vlan:
ret["result"] = True
ret["comment"] = comments["comment_vlan_created"]
ret["changes"] = comments["changes_vlan_created"]
else:
ret["result"] = False
ret["comment"] = comments["comment_vlan_notcreated"]
elif tunnel_type == "vxlan":
_check_vxlan()
if not ret["comment"]:
port_create_vxlan = __salt__["openvswitch.port_create_vxlan"](
bridge, name, id, remote, dst_port
)
if port_create_vxlan:
ret["result"] = True
ret["comment"] = comments["comment_vxlan_created"]
ret["changes"] = comments["changes_vxlan_created"]
else:
ret["result"] = False
ret["comment"] = comments["comment_vxlan_notcreated"]
elif tunnel_type == "gre":
_check_gre()
if not ret["comment"]:
port_create_gre = __salt__["openvswitch.port_create_gre"](
bridge, name, id, remote
)
if port_create_gre:
ret["result"] = True
ret["comment"] = comments["comment_gre_created"]
ret["changes"] = comments["changes_gre_created"]
else:
ret["result"] = False
ret["comment"] = comments["comment_gre_notcreated"]
else:
if name in port_list:
current_type = __salt__["openvswitch.interface_get_type"](name)
# The interface type is returned as a single-element list.
if internal and (current_type != ["internal"]):
# We do not have a direct way of only setting the interface
# type to internal, so we add the port with the --may-exist
# option.
port_add = __salt__["openvswitch.port_add"](
bridge, name, may_exist=True, internal=internal
)
if port_add:
ret["result"] = True
ret["comment"] = comments["comment_port_internal"]
ret["changes"] = comments["changes_port_internal"]
else:
ret["result"] = False
ret["comment"] = comments["comment_port_internal_not_changed"]
else:
ret["result"] = True
ret["comment"] = comments["comment_port_exists"]
else:
port_add = __salt__["openvswitch.port_add"](
bridge, name, internal=internal
)
if port_add:
ret["result"] = True
ret["comment"] = comments["comment_port_created"]
ret["changes"] = comments["changes_port_created"]
else:
ret["result"] = False
ret["comment"] = comments["comment_port_notcreated"]
else:
ret["result"] = False
ret["comment"] = comments["comment_bridge_notexists"]
return ret
def absent(name, bridge=None):
"""
Ensures that the named port exists on bridge, eventually deletes it.
If bridge is not set, port is removed from whatever bridge contains it.
Args:
name: The name of the port.
bridge: The name of the bridge.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
bridge_exists = False
if bridge:
bridge_exists = __salt__["openvswitch.bridge_exists"](bridge)
if bridge_exists:
port_list = __salt__["openvswitch.port_list"](bridge)
else:
port_list = ()
else:
port_list = [name]
# Comment and change messages
comments = {}
comments["comment_bridge_notexists"] = "Bridge {} does not exist.".format(bridge)
comments["comment_port_notexists"] = "Port {} does not exist on bridge {}.".format(
name, bridge
)
comments["comment_port_deleted"] = "Port {} deleted.".format(name)
comments["comment_port_notdeleted"] = "Unable to delete port {}.".format(name)
comments["changes_port_deleted"] = {
name: {
"old": "Port named {} may exist.".format(name),
"new": "Deleted port {}.".format(name),
}
}
# Dry run, test=true mode
if __opts__["test"]:
if bridge and not bridge_exists:
ret["result"] = None
ret["comment"] = comments["comment_bridge_notexists"]
elif name not in port_list:
ret["result"] = True
ret["comment"] = comments["comment_port_notexists"]
else:
ret["result"] = None
ret["comment"] = comments["comment_port_deleted"]
return ret
if bridge and not bridge_exists:
ret["result"] = False
ret["comment"] = comments["comment_bridge_notexists"]
elif name not in port_list:
ret["result"] = True
ret["comment"] = comments["comment_port_notexists"]
else:
if bridge:
port_remove = __salt__["openvswitch.port_remove"](br=bridge, port=name)
else:
port_remove = __salt__["openvswitch.port_remove"](br=None, port=name)
if port_remove:
ret["result"] = True
ret["comment"] = comments["comment_port_deleted"]
ret["changes"] = comments["changes_port_deleted"]
else:
ret["result"] = False
ret["comment"] = comments["comment_port_notdeleted"]
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/openvswitch_port.py | 0.746786 | 0.166574 | openvswitch_port.py | pypi |
import salt.utils.openstack.pyrax as suop
def __virtual__():
"""
Only load if pyrax is available.
"""
return suop.HAS_PYRAX
def present(name, provider):
"""
Ensure the RackSpace queue exists.
name
Name of the Rackspace queue.
provider
Salt Cloud Provider
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_present = next(
iter(
__salt__["cloud.action"]("queues_exists", provider=provider, name=name)[
provider
].values()
)
)
if not is_present:
if __opts__["test"]:
msg = "Rackspace queue {} is set to be created.".format(name)
ret["comment"] = msg
ret["result"] = None
return ret
created = __salt__["cloud.action"](
"queues_create", provider=provider, name=name
)
if created:
queue = __salt__["cloud.action"](
"queues_show", provider=provider, name=name
)
ret["changes"]["old"] = {}
ret["changes"]["new"] = {"queue": queue}
else:
ret["result"] = False
ret["comment"] = "Failed to create {} Rackspace queue.".format(name)
return ret
else:
ret["comment"] = "{} present.".format(name)
return ret
def absent(name, provider):
"""
Ensure the named Rackspace queue is deleted.
name
Name of the Rackspace queue.
provider
Salt Cloud provider
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
is_present = next(
iter(
__salt__["cloud.action"]("queues_exists", provider=provider, name=name)[
provider
].values()
)
)
if is_present:
if __opts__["test"]:
ret["comment"] = "Rackspace queue {} is set to be removed.".format(name)
ret["result"] = None
return ret
queue = __salt__["cloud.action"]("queues_show", provider=provider, name=name)
deleted = __salt__["cloud.action"](
"queues_delete", provider=provider, name=name
)
if deleted:
ret["changes"]["old"] = queue
ret["changes"]["new"] = {}
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} Rackspace queue.".format(name)
else:
ret["comment"] = "{} does not exist.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/pyrax_queues.py | 0.556761 | 0.28758 | pyrax_queues.py | pypi |
__virtualname__ = "neutron_secgroup_rule"
def __virtual__():
if "neutronng.list_subnets" in __salt__:
return __virtualname__
return (
False,
"The neutronng execution module failed to load: shade python module is not available",
)
def _rule_compare(rule1, rule2):
"""
Compare the common keys between security group rules against eachother
"""
commonkeys = set(rule1.keys()).intersection(rule2.keys())
for key in commonkeys:
if rule1[key] != rule2[key]:
return False
return True
def present(name, auth=None, **kwargs):
"""
Ensure a security group rule exists
defaults: port_range_min=None, port_range_max=None, protocol=None,
remote_ip_prefix=None, remote_group_id=None, direction='ingress',
ethertype='IPv4', project_id=None
name
Name of the security group to associate with this rule
project_name
Name of the project associated with the security group
protocol
The protocol that is matched by the security group rule.
Valid values are None, tcp, udp, and icmp.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
if "project_name" in kwargs:
kwargs["project_id"] = kwargs["project_name"]
del kwargs["project_name"]
project = __salt__["keystoneng.project_get"](name=kwargs["project_id"])
if project is None:
ret["result"] = False
ret["comment"] = "Project does not exist"
return ret
secgroup = __salt__["neutronng.security_group_get"](
name=name, filters={"tenant_id": project.id}
)
if secgroup is None:
ret["result"] = False
ret["changes"] = ({},)
ret["comment"] = "Security Group does not exist {}".format(name)
return ret
# we have to search through all secgroup rules for a possible match
rule_exists = None
for rule in secgroup["security_group_rules"]:
if _rule_compare(rule, kwargs) is True:
rule_exists = True
if rule_exists is None:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Security Group rule will be created."
return ret
# The variable differences are a little clumsy right now
kwargs["secgroup_name_or_id"] = secgroup
new_rule = __salt__["neutronng.security_group_rule_create"](**kwargs)
ret["changes"] = new_rule
ret["comment"] = "Created security group rule"
return ret
return ret
def absent(name, auth=None, **kwargs):
"""
Ensure a security group rule does not exist
name
name or id of the security group rule to delete
rule_id
uuid of the rule to delete
project_id
id of project to delete rule from
"""
rule_id = kwargs["rule_id"]
ret = {"name": rule_id, "changes": {}, "result": True, "comment": ""}
__salt__["neutronng.setup_clouds"](auth)
secgroup = __salt__["neutronng.security_group_get"](
name=name, filters={"tenant_id": kwargs["project_id"]}
)
# no need to delete a rule if the security group doesn't exist
if secgroup is None:
ret["comment"] = "security group does not exist"
return ret
# This should probably be done with compare on fields instead of
# rule_id in the future
rule_exists = None
for rule in secgroup["security_group_rules"]:
if _rule_compare(rule, {"id": rule_id}) is True:
rule_exists = True
if rule_exists:
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"id": kwargs["rule_id"]}
ret["comment"] = "Security group rule will be deleted."
return ret
__salt__["neutronng.security_group_rule_delete"](rule_id=rule_id)
ret["changes"]["id"] = rule_id
ret["comment"] = "Deleted security group rule"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/neutron_secgroup_rule.py | 0.641422 | 0.177632 | neutron_secgroup_rule.py | pypi |
_DEFAULT_CONF = "/etc/logrotate.conf"
# Define the module's virtual name
__virtualname__ = "logrotate"
# Define a function alias in order not to shadow built-in's
__func_alias__ = {"set_": "set"}
def __virtual__():
"""
Load only on minions that have the logrotate module.
"""
if "logrotate.show_conf" in __salt__:
return __virtualname__
return (False, "logrotate module could not be loaded")
def _convert_if_int(value):
"""
Convert to an int if necessary.
:param str value: The value to check/convert.
:return: The converted or passed value.
:rtype: bool|int|str
"""
try:
value = int(str(value))
except ValueError:
pass
return value
def set_(name, key, value, setting=None, conf_file=_DEFAULT_CONF):
"""
Set a new value for a specific configuration line.
:param str key: The command or block to configure.
:param str value: The command value or command of the block specified by the key parameter.
:param str setting: The command value for the command specified by the value parameter.
:param str conf_file: The logrotate configuration file.
Example of usage with only the required arguments:
.. code-block:: yaml
logrotate-rotate:
logrotate.set:
- key: rotate
- value: 2
Example of usage specifying all available arguments:
.. code-block:: yaml
logrotate-wtmp-rotate:
logrotate.set:
- key: /var/log/wtmp
- value: rotate
- setting: 2
- conf_file: /etc/logrotate.conf
"""
ret = {"name": name, "changes": dict(), "comment": "", "result": None}
try:
if setting is None:
current_value = __salt__["logrotate.get"](key=key, conf_file=conf_file)
else:
current_value = __salt__["logrotate.get"](
key=key, value=value, conf_file=conf_file
)
except (AttributeError, KeyError):
current_value = False
if setting is None:
value = _convert_if_int(value)
if current_value == value:
ret["comment"] = "Command '{}' already has value: {}".format(key, value)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Command '{}' will be set to value: {}".format(key, value)
ret["changes"] = {"old": current_value, "new": value}
else:
ret["changes"] = {"old": current_value, "new": value}
ret["result"] = __salt__["logrotate.set"](
key=key, value=value, conf_file=conf_file
)
if ret["result"]:
ret["comment"] = "Set command '{}' value: {}".format(key, value)
else:
ret["comment"] = "Unable to set command '{}' value: {}".format(
key, value
)
return ret
setting = _convert_if_int(setting)
if current_value == setting:
ret["comment"] = "Block '{}' command '{}' already has value: {}".format(
key, value, setting
)
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "Block '{}' command '{}' will be set to value: {}".format(
key, value, setting
)
ret["changes"] = {"old": current_value, "new": setting}
else:
ret["changes"] = {"old": current_value, "new": setting}
ret["result"] = __salt__["logrotate.set"](
key=key, value=value, setting=setting, conf_file=conf_file
)
if ret["result"]:
ret["comment"] = "Set block '{}' command '{}' value: {}".format(
key, value, setting
)
else:
ret["comment"] = "Unable to set block '{}' command '{}' value: {}".format(
key, value, setting
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/logrotate.py | 0.586523 | 0.247773 | logrotate.py | pypi |
r"""
Execution of Ansible modules from within states
===============================================
With `ansible.call` these states allow individual Ansible module calls to be
made via states. To call an Ansible module function use a :mod:`module.run <salt.states.ansible.call>`
state:
.. code-block:: yaml
some_set_of_tasks:
ansible:
- system.ping
- packaging.os.zypper
- name: emacs
- state: installed
"""
import logging
import os
import sys
# Import salt modules
import salt.fileclient
import salt.utils.decorators.path
from salt.utils.decorators import depends
log = logging.getLogger(__name__)
__virtualname__ = "ansible"
@depends("ansible")
class AnsibleState:
"""
Ansible state caller.
"""
def get_args(self, argset):
"""
Get args and kwargs from the argset.
:param argset:
:return:
"""
args = []
kwargs = {}
for element in argset or []:
if isinstance(element, dict):
kwargs.update(element)
else:
args.append(element)
return args, kwargs
def __call__(self, **kwargs):
"""
Call Ansible module.
:return:
"""
ret = {
"name": kwargs.pop("name"),
"changes": {},
"comment": "",
"result": True,
}
for mod_name, mod_params in kwargs.items():
args, kwargs = self.get_args(mod_params)
try:
ans_mod_out = __salt__["ansible.{}".format(mod_name)](
**{"__pub_arg": [args, kwargs]}
)
except Exception as err: # pylint: disable=broad-except
ans_mod_out = 'Module "{}" failed. Error message: ({}) {}'.format(
mod_name, err.__class__.__name__, err
)
ret["result"] = False
ret["changes"][mod_name] = ans_mod_out
return ret
def __virtual__():
"""
Disable, if Ansible is not available around on the Minion.
"""
# pylint: disable=unnecessary-lambda
setattr(sys.modules[__name__], "call", lambda **kwargs: AnsibleState()(**kwargs))
# pylint: enable=unnecessary-lambda
return __virtualname__
def _client():
"""
Get a fileclient
"""
return salt.fileclient.get_file_client(__opts__)
def _changes(plays):
"""
Find changes in ansible return data
"""
changes = {}
for play in plays["plays"]:
task_changes = {}
for task in play["tasks"]:
host_changes = {}
for host, data in task["hosts"].items():
if data.get("changed", False) is True:
host_changes[host] = data.get("diff", data.get("changes", {}))
elif any(x in data for x in ["failed", "skipped", "unreachable"]):
host_changes[host] = data.get("results", data.get("msg", {}))
if host_changes:
task_changes[task["task"]["name"]] = host_changes
if task_changes:
changes[play["play"]["name"]] = task_changes
return changes
@salt.utils.decorators.path.which("ansible-playbook")
def playbooks(name, rundir=None, git_repo=None, git_kwargs=None, ansible_kwargs=None):
"""
Run Ansible Playbooks
:param name: path to playbook. This can be relative to rundir or the git repo
:param rundir: location to run ansible-playbook from.
:param git_repo: git repository to clone for ansible playbooks. This is cloned
using the `git.latest` state, and is cloned to the `rundir`
if specified, otherwise it is clone to the `cache_dir`
:param git_kwargs: extra kwargs to pass to `git.latest` state module besides
the `name` and `target`
:param ansible_kwargs: extra kwargs to pass to `ansible.playbooks` execution
module besides the `name` and `target`
:return: Ansible playbook output.
.. code-block:: yaml
run nginx install:
ansible.playbooks:
- name: install.yml
- git_repo: git://github.com/gituser/playbook.git
- git_kwargs:
rev: master
"""
ret = {
"result": False,
"changes": {},
"comment": "Running playbook {}".format(name),
"name": name,
}
if git_repo:
if not isinstance(rundir, str) or not os.path.isdir(rundir):
rundir = _client()._extrn_path(git_repo, "base")
log.trace("rundir set to %s", rundir)
if not isinstance(git_kwargs, dict):
log.debug("Setting git_kwargs to empty dict: %s", git_kwargs)
git_kwargs = {}
__states__["git.latest"](name=git_repo, target=rundir, **git_kwargs)
if not isinstance(ansible_kwargs, dict):
log.debug("Setting ansible_kwargs to empty dict: %s", ansible_kwargs)
ansible_kwargs = {}
if __opts__["test"]:
checks = __salt__["ansible.playbooks"](
name, rundir=rundir, check=True, diff=True, **ansible_kwargs
)
if "stats" not in checks:
ret["comment"] = checks.get("stderr", checks)
ret["result"] = False
ret["changes"] = {}
elif all(
not check["changed"] and not check["failures"] and not check["unreachable"]
for check in checks["stats"].values()
):
ret["comment"] = "No changes to be made from playbook {}".format(name)
ret["result"] = True
elif any(
check["changed"] and not check["failures"] and not check["unreachable"]
for check in checks["stats"].values()
):
ret["comment"] = "Changes will be made from playbook {}".format(name)
ret["result"] = None
ret["changes"] = _changes(checks)
else:
ret["comment"] = "There were some issues running the playbook {}".format(
name
)
ret["result"] = False
ret["changes"] = _changes(checks)
else:
results = __salt__["ansible.playbooks"](
name, rundir=rundir, diff=True, **ansible_kwargs
)
if "stats" not in results:
ret["comment"] = results.get("stderr", results)
ret["result"] = False
ret["changes"] = {}
elif all(
not check["changed"] and not check["failures"] and not check["unreachable"]
for check in results["stats"].values()
):
ret["comment"] = "No changes to be made from playbook {}".format(name)
ret["result"] = True
ret["changes"] = _changes(results)
else:
ret["changes"] = _changes(results)
ret["result"] = all(
not check["failures"] and not check["unreachable"]
for check in results["stats"].values()
)
if ret["result"]:
ret["comment"] = "Changes were made by playbook {}".format(name)
else:
ret[
"comment"
] = "There were some issues running the playbook {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/ansiblegate.py | 0.558568 | 0.356503 | ansiblegate.py | pypi |
import salt.utils.compat
def __virtual__():
if "cisconso.set_data_value" in __salt__:
return True
return (False, "cisconso module could not be loaded")
def value_present(name, datastore, path, config):
"""
Ensure a specific value exists at a given path
:param name: The name for this rule
:type name: ``str``
:param datastore: The datastore, e.g. running, operational.
One of the NETCONF store IETF types
:type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path to set the value at,
a list of element names in order, / separated
:type path: ``list``, ``str`` OR ``tuple``
:param config: The new value at the given path
:type config: ``dict``
Examples:
.. code-block:: yaml
enable pap auth:
cisconso.config_present:
- name: enable_pap_auth
- datastore: running
- path: devices/device/ex0/config/sys/interfaces/serial/ppp0/authentication
- config:
authentication:
method: pap
"list-name": foobar
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
existing = __salt__["cisconso.get_data"](datastore, path)
if salt.utils.compat.cmp(existing, config):
ret["result"] = True
ret["comment"] = "Config is already set"
elif __opts__["test"] is True:
ret["result"] = None
ret["comment"] = "Config will be added"
diff = _DictDiffer(existing, config)
ret["changes"]["new"] = diff.added()
ret["changes"]["removed"] = diff.removed()
ret["changes"]["changed"] = diff.changed()
else:
__salt__["cisconso.set_data_value"](datastore, path, config)
ret["result"] = True
ret["comment"] = "Successfully added config"
diff = _DictDiffer(existing, config)
ret["changes"]["new"] = diff.added()
ret["changes"]["removed"] = diff.removed()
ret["changes"]["changed"] = diff.changed()
return ret
class _DictDiffer:
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = (
set(current_dict.keys()),
set(past_dict.keys()),
)
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return {o for o in self.intersect if self.past_dict[o] != self.current_dict[o]}
def unchanged(self):
return {o for o in self.intersect if self.past_dict[o] == self.current_dict[o]} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/cisconso.py | 0.765944 | 0.336781 | cisconso.py | pypi |
import logging
import os.path
import re
log = logging.getLogger(__name__)
def _size_convert(_re_size):
converted_size = int(_re_size.group("size_value"))
if _re_size.group("size_unit") == "m":
converted_size = int(converted_size) * 1024
if _re_size.group("size_unit") == "g":
converted_size = int(converted_size) * 1024 * 1024
return converted_size
def mounted(
name,
device,
fstype,
mkmnt=False,
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
persist=True,
mount=True,
user=None,
match_on="auto",
device_name_regex=None,
extra_mount_invisible_options=None,
extra_mount_invisible_keys=None,
extra_mount_ignore_fs_keys=None,
extra_mount_translate_options=None,
hidden_opts=None,
**kwargs
):
"""
Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA``
fstype
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts
mkmnt
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, Default is ``True``
user
The account used to execute the mount; this defaults to the user salt is
running as on the minion
match_on
A name or list of fstab properties on which this state should be applied.
Default is ``auto``, a special value indicating to guess based on fstype.
In general, ``auto`` matches on name for recognized special devices and
device otherwise.
device_name_regex
A list of device exact names or regular expressions which should
not force a remount. For example, glusterfs may be mounted with a
comma-separated list of servers in fstab, but the /proc/self/mountinfo
will show only the first available server.
.. code-block:: jinja
{% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %}
mount glusterfs volume:
mount.mounted:
- name: /mnt/glusterfs_mount_point
- device: {{ glusterfs_ip_list|join(',') }}:/volume_name
- fstype: glusterfs
- opts: _netdev,rw,defaults,direct-io-mode=disable
- mkmnt: True
- persist: True
- dump: 0
- pass_num: 0
- device_name_regex:
- ({{ glusterfs_ip_list|join('|') }}):/volume_name
.. versionadded:: 2016.11.0
extra_mount_invisible_options
A list of extra options that are not visible through the
``/proc/self/mountinfo`` interface.
If a option is not visible through this interface it will always remount
the device. This option extends the builtin ``mount_invisible_options``
list.
extra_mount_invisible_keys
A list of extra key options that are not visible through the
``/proc/self/mountinfo`` interface.
If a key option is not visible through this interface it will always
remount the device. This option extends the builtin
``mount_invisible_keys`` list.
A good example for a key option is the password option::
password=badsecret
extra_mount_ignore_fs_keys
A dict of filesystem options which should not force a remount. This will update
the internal dictionary. The dict should look like this::
{
'ramfs': ['size']
}
extra_mount_translate_options
A dict of mount options that gets translated when mounted. To prevent a remount
add additional options to the default dictionary. This will update the internal
dictionary. The dictionary should look like this::
{
'tcp': 'proto=tcp',
'udp': 'proto=udp'
}
hidden_opts
A list of mount options that will be ignored when considering a remount
as part of the state application
.. versionadded:: 2015.8.2
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
update_mount_cache = False
if not name:
ret["result"] = False
ret["comment"] = "Must provide name to mount.mounted"
return ret
if not device:
ret["result"] = False
ret["comment"] = "Must provide device to mount.mounted"
return ret
if not fstype:
ret["result"] = False
ret["comment"] = "Must provide fstype to mount.mounted"
return ret
if device_name_regex is None:
device_name_regex = []
# Defaults is not a valid option on Mac OS
if __grains__["os"] in ["MacOS", "Darwin"] and opts == "defaults":
opts = "noowners"
# Defaults is not a valid option on AIX
if __grains__["os"] in ["AIX"]:
if opts == "defaults":
opts = ""
# Make sure that opts is correct, it can be a list or a comma delimited
# string
if isinstance(opts, str):
opts = opts.split(",")
if opts:
opts.sort()
if isinstance(hidden_opts, str):
hidden_opts = hidden_opts.split(",")
# remove possible trailing slash
if not name == "/":
name = name.rstrip("/")
device_list = []
# Get the active data
active = __salt__["mount.active"](extended=True)
real_name = os.path.realpath(name)
if device.startswith("/"):
if "bind" in opts and real_name in active:
_device = device
if active[real_name]["device"].startswith("/"):
# Find the device that the bind really points at.
while True:
if _device in active:
_real_device = active[_device]["device"]
opts = list(
set(
opts
+ active[_device]["opts"]
+ active[_device]["superopts"]
)
)
active[real_name]["opts"].append("bind")
break
_device = os.path.dirname(_device)
real_device = _real_device
else:
# Remote file systems act differently.
if _device in active:
opts = list(
set(
opts
+ active[_device]["opts"]
+ active[_device]["superopts"]
)
)
active[real_name]["opts"].append("bind")
real_device = active[real_name]["device"]
else:
real_device = os.path.realpath(device)
elif device.upper().startswith("UUID="):
real_device = device.split("=")[1].strip('"').lower()
elif device.upper().startswith("LABEL="):
_label = device.split("=")[1]
cmd = "blkid -t LABEL={}".format(_label)
res = __salt__["cmd.run_all"]("{}".format(cmd))
if res["retcode"] > 0:
ret["comment"] = "Unable to find device with label {}.".format(_label)
ret["result"] = False
return ret
else:
# output is a list of entries like this:
# /dev/sda: LABEL="<label>" UUID="<uuid>" UUID_SUB="<uuid>" TYPE="btrfs"
# exact list of properties varies between filesystems, but we're
# only interested in the device in the first column
for line in res["stdout"]:
dev_with_label = line.split(":")[0]
device_list.append(dev_with_label)
real_device = device_list[0]
else:
real_device = device
# LVS devices have 2 names under /dev:
# /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name
# No matter what name is used for mounting,
# mount always displays the device as /dev/mapper/vg--name-lv--name
# Note the double-dash escaping.
# So, let's call that the canonical device name
# We should normalize names of the /dev/vg-name/lv-name type to the canonical name
lvs_match = re.match(r"^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)", device)
if lvs_match:
double_dash_escaped = {
k: re.sub(r"-", "--", v) for k, v in lvs_match.groupdict().items()
}
mapper_device = "/dev/mapper/{vg_name}-{lv_name}".format(**double_dash_escaped)
if os.path.exists(mapper_device):
real_device = mapper_device
# When included in a Salt state file, FUSE devices are prefaced by the
# filesystem type and a hash, e.g. sshfs. In the mount list only the
# hostname is included. So if we detect that the device is a FUSE device
# then we remove the prefaced string so that the device in state matches
# the device in the mount list.
fuse_match = re.match(r"^\w+\#(?P<device_name>.+)", device)
if fuse_match:
if "device_name" in fuse_match.groupdict():
real_device = fuse_match.group("device_name")
if real_name in active:
if "superopts" not in active[real_name]:
active[real_name]["superopts"] = []
if mount:
device_list.append(active[real_name]["device"])
device_list.append(os.path.realpath(device_list[0]))
alt_device = (
active[real_name]["alt_device"]
if "alt_device" in active[real_name]
else None
)
uuid_device = (
active[real_name]["device_uuid"]
if "device_uuid" in active[real_name]
else None
)
label_device = (
active[real_name]["device_label"]
if "device_label" in active[real_name]
else None
)
if alt_device and alt_device not in device_list:
device_list.append(alt_device)
if uuid_device and uuid_device not in device_list:
device_list.append(uuid_device)
if label_device and label_device not in device_list:
device_list.append(label_device)
if opts:
opts.sort()
mount_invisible_options = [
"_netdev",
"actimeo",
"bg",
"comment",
"defaults",
"delay_connect",
"direct-io-mode",
"intr",
"loop",
"nointr",
"nobootwait",
"nofail",
"password",
"reconnect",
"retry",
"soft",
"auto",
"users",
"bind",
"nonempty",
"transform_symlinks",
"port",
"backup-volfile-servers",
]
if extra_mount_invisible_options:
mount_invisible_options.extend(extra_mount_invisible_options)
if hidden_opts:
mount_invisible_options = list(
set(mount_invisible_options) | set(hidden_opts)
)
# options which are provided as key=value (e.g. password=Zohp5ohb)
mount_invisible_keys = [
"actimeo",
"comment",
"credentials",
"direct-io-mode",
"password",
"port",
"retry",
"secretfile",
]
if extra_mount_invisible_keys:
mount_invisible_keys.extend(extra_mount_invisible_keys)
# Some filesystems have options which should not force a remount.
mount_ignore_fs_keys = {"ramfs": ["size"]}
if extra_mount_ignore_fs_keys:
mount_ignore_fs_keys.update(extra_mount_ignore_fs_keys)
# Some options are translated once mounted
mount_translate_options = {
"tcp": "proto=tcp",
"udp": "proto=udp",
}
if extra_mount_translate_options:
mount_translate_options.update(extra_mount_translate_options)
for opt in opts:
if opt in mount_translate_options:
opt = mount_translate_options[opt]
keyval_option = opt.split("=")[0]
if keyval_option in mount_invisible_keys:
opt = keyval_option
size_match = re.match(
r"size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)", opt
)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={}k".format(converted_size)
# make cifs option user synonym for option username which is reported by /proc/mounts
if fstype in ["cifs"] and opt.split("=")[0] == "user":
opt = "username={}".format(opt.split("=")[1])
if opt.split("=")[0] in mount_ignore_fs_keys.get(fstype, []):
opt = opt.split("=")[0]
# convert uid/gid to numeric value from user/group name
name_id_opts = {"uid": "user.info", "gid": "group.info"}
if opt.split("=")[0] in name_id_opts and len(opt.split("=")) > 1:
_givenid = opt.split("=")[1]
_param = opt.split("=")[0]
_id = _givenid
if not re.match("[0-9]+$", _givenid):
_info = __salt__[name_id_opts[_param]](_givenid)
if _info and _param in _info:
_id = _info[_param]
opt = _param + "=" + str(_id)
_active_superopts = active[real_name].get("superopts", [])
for _active_opt in _active_superopts:
size_match = re.match(
r"size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)",
_active_opt,
)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={}k".format(converted_size)
_active_superopts.remove(_active_opt)
_active_opt = "size={}k".format(converted_size)
_active_superopts.append(_active_opt)
if (
opt not in active[real_name]["opts"]
and opt not in _active_superopts
and opt not in mount_invisible_options
and opt not in mount_ignore_fs_keys.get(fstype, [])
and opt not in mount_invisible_keys
):
if __opts__["test"]:
ret["result"] = None
ret[
"comment"
] = "Remount would be forced because options ({}) changed".format(
opt
)
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ["nfs", "cvfs"] or fstype.startswith("fuse"):
ret["changes"]["umount"] = (
"Forced unmount and mount because "
+ "options ({}) changed".format(opt)
)
unmount_result = __salt__["mount.umount"](real_name)
if unmount_result is True:
mount_result = __salt__["mount.mount"](
real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
opts=opts,
)
ret["result"] = mount_result
else:
ret["result"] = False
ret["comment"] = "Unable to unmount {}: {}.".format(
real_name, unmount_result
)
return ret
else:
ret["changes"]["umount"] = (
"Forced remount because "
+ "options ({}) changed".format(opt)
)
remount_result = __salt__["mount.remount"](
real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
opts=opts,
)
ret["result"] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if "remount" in opts:
opts.remove("remount")
# Update the cache
update_mount_cache = True
mount_cache = __salt__["mount.read_mount_cache"](real_name)
if "opts" in mount_cache:
_missing = [opt for opt in mount_cache["opts"] if opt not in opts]
if _missing:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Remount would be forced because"
" options ({})"
"changed".format(",".join(_missing))
)
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ["nfs", "cvfs"] or fstype.startswith("fuse"):
ret["changes"]["umount"] = (
"Forced unmount and mount because "
+ "options ({}) changed".format(opt)
)
unmount_result = __salt__["mount.umount"](real_name)
if unmount_result is True:
mount_result = __salt__["mount.mount"](
real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
opts=opts,
)
ret["result"] = mount_result
else:
ret["result"] = False
ret["comment"] = "Unable to unmount {}: {}.".format(
real_name, unmount_result
)
return ret
else:
ret["changes"]["umount"] = (
"Forced remount because "
+ "options ({}) changed".format(opt)
)
remount_result = __salt__["mount.remount"](
real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
opts=opts,
)
ret["result"] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if "remount" in opts:
opts.remove("remount")
update_mount_cache = True
else:
update_mount_cache = True
if real_device not in device_list:
# name matches but device doesn't - need to umount
_device_mismatch_is_ignored = None
for regex in list(device_name_regex):
for _device in device_list:
if re.match(regex, _device):
_device_mismatch_is_ignored = _device
break
if _device_mismatch_is_ignored:
ret["result"] = True
ret["comment"] = (
"An umount will not be forced "
+ "because device matched device_name_regex: "
+ _device_mismatch_is_ignored
)
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"An umount would have been forced "
+ "because devices do not match. Watched: "
+ device
)
else:
ret["changes"]["umount"] = (
"Forced unmount because devices "
+ "don't match. Wanted: "
+ device
)
if real_device != device:
ret["changes"]["umount"] += " (" + real_device + ")"
ret["changes"]["umount"] += ", current: " + ", ".join(device_list)
out = __salt__["mount.umount"](real_name, user=user)
active = __salt__["mount.active"](extended=True)
if real_name in active:
ret["comment"] = "Unable to unmount"
ret["result"] = None
return ret
update_mount_cache = True
else:
ret["comment"] = "Target was already mounted"
# using a duplicate check so I can catch the results of a umount
if real_name not in active:
if mount:
# The mount is not present! Mount it
if __opts__["test"]:
ret["result"] = None
if os.path.exists(name):
ret["comment"] = "{} would be mounted".format(name)
elif mkmnt:
ret["comment"] = "{} would be created and mounted".format(name)
else:
ret[
"comment"
] = "{} does not exist and would not be created".format(name)
return ret
if not os.path.exists(name) and not mkmnt:
ret["result"] = False
ret["comment"] = "Mount directory is not present"
return ret
out = __salt__["mount.mount"](name, device, mkmnt, fstype, opts, user=user)
active = __salt__["mount.active"](extended=True)
update_mount_cache = True
if isinstance(out, str):
# Failed to (re)mount, the state has failed!
ret["comment"] = out
ret["result"] = False
return ret
elif real_name in active:
# (Re)mount worked!
ret["comment"] = "Target was successfully mounted"
ret["changes"]["mount"] = True
elif not os.path.exists(name):
if __opts__["test"]:
ret["result"] = None
if mkmnt:
ret["comment"] = "{} would be created, but not mounted".format(name)
else:
ret[
"comment"
] = "{} does not exist and would neither be created nor mounted".format(
name
)
elif mkmnt:
__salt__["file.mkdir"](name, user=user)
ret["comment"] = "{} was created, not mounted".format(name)
else:
ret["comment"] = "{} not present and not mounted".format(name)
else:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "{} would not be mounted".format(name)
else:
ret["comment"] = "{} not mounted".format(name)
if persist:
if "/etc/fstab" == config:
# Override default for Mac OS
if __grains__["os"] in ["MacOS", "Darwin"]:
config = "/etc/auto_salt"
# Override default for AIX
elif "AIX" in __grains__["os"]:
config = "/etc/filesystems"
if __opts__["test"]:
if __grains__["os"] in ["MacOS", "Darwin"]:
out = __salt__["mount.set_automaster"](
name, device, fstype, opts, config, test=True
)
elif __grains__["os"] in ["AIX"]:
out = __salt__["mount.set_filesystems"](
name,
device,
fstype,
opts,
mount,
config,
test=True,
match_on=match_on,
)
else:
out = __salt__["mount.set_fstab"](
name,
device,
fstype,
opts,
dump,
pass_num,
config,
test=True,
match_on=match_on,
)
if out != "present":
ret["result"] = None
if out == "new":
if mount:
comment = (
"{} is mounted, but needs to be "
"written to the fstab in order to be "
"made persistent.".format(name)
)
else:
comment = (
"{} needs to be "
"written to the fstab in order to be "
"made persistent.".format(name)
)
elif out == "change":
if mount:
comment = "{} is mounted, but its fstab entry must be updated.".format(
name
)
else:
comment = "The {} fstab entry must be updated.".format(name)
else:
ret["result"] = False
comment = (
"Unable to detect fstab status for "
"mount point {} due to unexpected "
"output '{}' from call to "
"mount.set_fstab. This is most likely "
"a bug.".format(name, out)
)
if "comment" in ret:
ret["comment"] = "{}. {}".format(ret["comment"], comment)
else:
ret["comment"] = comment
return ret
else:
if __grains__["os"] in ["MacOS", "Darwin"]:
out = __salt__["mount.set_automaster"](
name, device, fstype, opts, config
)
elif __grains__["os"] in ["AIX"]:
out = __salt__["mount.set_filesystems"](
name, device, fstype, opts, mount, config, match_on=match_on
)
else:
out = __salt__["mount.set_fstab"](
name,
device,
fstype,
opts,
dump,
pass_num,
config,
match_on=match_on,
)
if update_mount_cache:
cache_result = __salt__["mount.write_mount_cache"](
real_name, device, mkmnt=mkmnt, fstype=fstype, mount_opts=opts
)
if out == "present":
ret["comment"] += ". Entry already exists in the fstab."
return ret
if out == "new":
ret["changes"]["persist"] = "new"
ret["comment"] += ". Added new entry to the fstab."
return ret
if out == "change":
ret["changes"]["persist"] = "update"
ret["comment"] += ". Updated the entry in the fstab."
return ret
if out == "bad config":
ret["result"] = False
ret["comment"] += ". However, the fstab was not found."
return ret
return ret
def swap(name, persist=True, config="/etc/fstab"):
"""
Activates a swap device
.. code-block:: yaml
/root/swapfile:
mount.swap
.. note::
``swap`` does not currently support LABEL
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
on_ = __salt__["mount.swaps"]()
if __salt__["file.is_link"](name):
real_swap_device = __salt__["file.readlink"](name)
if not real_swap_device.startswith("/"):
real_swap_device = "/dev/{}".format(os.path.basename(real_swap_device))
else:
real_swap_device = name
if real_swap_device in on_:
ret["comment"] = "Swap {} already active".format(name)
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = "Swap {} is set to be activated".format(name)
else:
__salt__["mount.swapon"](real_swap_device)
on_ = __salt__["mount.swaps"]()
if real_swap_device in on_:
ret["comment"] = "Swap {} activated".format(name)
ret["changes"] = on_[real_swap_device]
else:
ret["comment"] = "Swap {} failed to activate".format(name)
ret["result"] = False
if persist:
device_key_name = "device"
if "AIX" in __grains__["os"]:
device_key_name = "dev"
if "/etc/fstab" == config:
# Override default for AIX
config = "/etc/filesystems"
fstab_data = __salt__["mount.filesystems"](config)
else:
fstab_data = __salt__["mount.fstab"](config)
if __opts__["test"]:
if name not in fstab_data and name not in [
fstab_data[item]["device"] for item in fstab_data
]:
ret["result"] = None
if name in on_:
ret[
"comment"
] = "Swap {} is set to be added to the fstab and to be activated".format(
name
)
return ret
if "none" in fstab_data:
if (
fstab_data["none"][device_key_name] == name
and fstab_data["none"]["fstype"] != "swap"
):
return ret
if "AIX" in __grains__["os"]:
out = None
ret["result"] = False
ret["comment"] += ". swap not present in /etc/filesystems on AIX."
return ret
else:
# present, new, change, bad config
# Make sure the entry is in the fstab
out = __salt__["mount.set_fstab"](
"none", name, "swap", ["defaults"], 0, 0, config
)
if out == "present":
return ret
if out == "new":
ret["changes"]["persist"] = "new"
ret["comment"] += ". Added new entry to the fstab."
return ret
if out == "change":
ret["changes"]["persist"] = "update"
ret["comment"] += ". Updated the entry in the fstab."
return ret
if out == "bad config":
ret["result"] = False
ret["comment"] += ". However, the fstab was not found."
return ret
return ret
def unmounted(
name, device=None, config="/etc/fstab", persist=False, user=None, **kwargs
):
"""
.. versionadded:: 0.17.0
Verify that a device is not mounted
name
The path to the location where the device is to be unmounted from
device
The device to be unmounted. This is optional because the device could
be mounted in multiple places.
.. versionadded:: 2015.5.0
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be purged from the fstab, Default is ``False``
user
The user to own the mount; this defaults to the user salt is
running as on the minion
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
update_mount_cache = False
if not name:
ret["result"] = False
ret["comment"] = "Must provide name to mount.unmounted"
return ret
# Get the active data
active = __salt__["mount.active"](extended=True)
if name not in active:
# Nothing to unmount
ret["comment"] = "Target was already unmounted"
if name in active:
# The mount is present! Unmount it
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Mount point {} is mounted but should not be".format(name)
return ret
if device:
out = __salt__["mount.umount"](name, device, user=user)
update_mount_cache = True
else:
out = __salt__["mount.umount"](name, user=user)
update_mount_cache = True
if isinstance(out, str):
# Failed to umount, the state has failed!
ret["comment"] = out
ret["result"] = False
elif out is True:
# umount worked!
ret["comment"] = "Target was successfully unmounted"
ret["changes"]["umount"] = True
else:
ret["comment"] = "Execute set to False, Target was not unmounted"
ret["result"] = True
if update_mount_cache:
cache_result = __salt__["mount.delete_mount_cache"](name)
if persist:
device_key_name = "device"
# Override default for Mac OS
if __grains__["os"] in ["MacOS", "Darwin"] and config == "/etc/fstab":
config = "/etc/auto_salt"
fstab_data = __salt__["mount.automaster"](config)
elif "AIX" in __grains__["os"]:
device_key_name = "dev"
if config == "/etc/fstab":
config = "/etc/filesystems"
fstab_data = __salt__["mount.filesystems"](config)
else:
fstab_data = __salt__["mount.fstab"](config)
if name not in fstab_data:
ret["comment"] += ". fstab entry not found"
else:
if device:
if fstab_data[name][device_key_name] != device:
ret["comment"] += ". fstab entry for device {} not found".format(
device
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Mount point {} is unmounted but needs to "
"be purged from {} to be made "
"persistent".format(name, config)
)
return ret
else:
if __grains__["os"] in ["MacOS", "Darwin"]:
out = __salt__["mount.rm_automaster"](name, device, config)
elif "AIX" in __grains__["os"]:
out = __salt__["mount.rm_filesystems"](name, device, config)
else:
out = __salt__["mount.rm_fstab"](name, device, config)
if out is not True:
ret["result"] = False
ret["comment"] += ". Failed to persist purge"
else:
ret["comment"] += ". Removed target from fstab"
ret["changes"]["persist"] = "purged"
return ret
def mod_watch(name, user=None, **kwargs):
"""
The mounted watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
name
The name of the mount point
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if kwargs["sfun"] == "mounted":
out = __salt__["mount.remount"](
name, kwargs["device"], False, kwargs["fstype"], kwargs["opts"], user=user
)
if out:
ret["comment"] = "{} remounted".format(name)
else:
ret["result"] = False
ret["comment"] = "{} failed to remount: {}".format(name, out)
else:
ret["comment"] = "Watch not supported in {} at this time".format(kwargs["sfun"])
return ret
def _convert_to(maybe_device, convert_to):
"""
Convert a device name, UUID or LABEL to a device name, UUID or
LABEL.
Return the fs_spec required for fstab.
"""
# Fast path. If we already have the information required, we can
# save one blkid call
if (
not convert_to
or (convert_to == "device" and maybe_device.startswith("/"))
or maybe_device.startswith("{}=".format(convert_to.upper()))
):
return maybe_device
# Get the device information
if maybe_device.startswith("/"):
blkid = __salt__["disk.blkid"](maybe_device)
else:
blkid = __salt__["disk.blkid"](token=maybe_device)
result = None
if len(blkid) == 1:
if convert_to == "device":
result = next(iter(blkid))
else:
key = convert_to.upper()
result = "{}={}".format(key, next(iter(blkid.values()))[key])
return result
def fstab_present(
name,
fs_file,
fs_vfstype,
fs_mntops="defaults",
fs_freq=0,
fs_passno=0,
mount_by=None,
config="/etc/fstab",
mount=True,
match_on="auto",
not_change=False,
fs_mount=True,
):
"""Makes sure that a fstab mount point is present.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
fs_mount
Field is used only in AIX systems to determine if the
filesystem will be mounted by ``mount all``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise.
not_change
By default, if the entry is found in the fstab file but is
different from the expected content (like different options),
the entry will be replaced with the correct content. If this
parameter is set to ``True`` and the line is found, the
original content will be preserved.
"""
ret = {
"name": name,
"result": False,
"changes": {},
"comment": [],
}
# Adjust fs_mntops based on the OS
if fs_mntops == "defaults":
if __grains__["os"] in ["MacOS", "Darwin"]:
fs_mntops = "noowners"
elif __grains__["os"] == "AIX":
fs_mntops = ""
# Adjust the config file based on the OS
if config == "/etc/fstab":
if __grains__["os"] in ["MacOS", "Darwin"]:
config = "/etc/auto_salt"
elif __grains__["os"] == "AIX":
config = "/etc/filesystems"
if not fs_file == "/":
fs_file = fs_file.rstrip("/")
fs_spec = _convert_to(name, mount_by)
# Validate that the device is valid after the conversion
if not fs_spec:
msg = "Device {} cannot be converted to {}"
ret["comment"].append(msg.format(name, mount_by))
return ret
if __opts__["test"]:
if __grains__["os"] in ["MacOS", "Darwin"]:
out = __salt__["mount.set_automaster"](
name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
test=True,
not_change=not_change,
)
elif __grains__["os"] == "AIX":
out = __salt__["mount.set_filesystems"](
name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=fs_mount,
config=config,
test=True,
match_on=match_on,
not_change=not_change,
)
else:
out = __salt__["mount.set_fstab"](
name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
test=True,
match_on=match_on,
not_change=not_change,
)
ret["result"] = None
if out == "present":
msg = "{} entry is already in {}."
ret["comment"].append(msg.format(fs_file, config))
elif out == "new":
msg = "{} entry will be written in {}."
ret["comment"].append(msg.format(fs_file, config))
if mount:
msg = "Will mount {} on {}".format(name, fs_file)
ret["comment"].append(msg)
elif out == "change":
msg = "{} entry will be updated in {}."
ret["comment"].append(msg.format(fs_file, config))
else:
ret["result"] = False
msg = "{} entry cannot be created in {}: {}."
ret["comment"].append(msg.format(fs_file, config, out))
return ret
if __grains__["os"] in ["MacOS", "Darwin"]:
out = __salt__["mount.set_automaster"](
name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
not_change=not_change,
)
elif __grains__["os"] == "AIX":
out = __salt__["mount.set_filesystems"](
name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=fs_mount,
config=config,
match_on=match_on,
not_change=not_change,
)
else:
out = __salt__["mount.set_fstab"](
name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
match_on=match_on,
not_change=not_change,
)
ret["result"] = True
if out == "present":
msg = "{} entry was already in {}."
ret["comment"].append(msg.format(fs_file, config))
elif out == "new":
ret["changes"]["persist"] = out
msg = "{} entry added in {}."
ret["comment"].append(msg.format(fs_file, config))
if mount:
out = __salt__["mount.mount"](fs_file)
if type(out) == str:
ret["result"] = False
msg = "Error while mounting {}".format(out.split(":", maxsplit=1)[1])
else:
msg = "Mounted {} on {}".format(name, fs_file)
ret["comment"].append(msg)
elif out == "change":
ret["changes"]["persist"] = out
msg = "{} entry updated in {}."
ret["comment"].append(msg.format(fs_file, config))
else:
ret["result"] = False
msg = "{} entry cannot be changed in {}: {}."
ret["comment"].append(msg.format(fs_file, config, out))
return ret
def fstab_absent(name, fs_file, mount_by=None, config="/etc/fstab"):
"""
Makes sure that a fstab mount point is absent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives
"""
ret = {
"name": name,
"result": False,
"changes": {},
"comment": [],
}
# Adjust the config file based on the OS
if config == "/etc/fstab":
if __grains__["os"] in ["MacOS", "Darwin"]:
config = "/etc/auto_salt"
elif __grains__["os"] == "AIX":
config = "/etc/filesystems"
if not fs_file == "/":
fs_file = fs_file.rstrip("/")
fs_spec = _convert_to(name, mount_by)
if __grains__["os"] in ["MacOS", "Darwin"]:
fstab_data = __salt__["mount.automaster"](config)
elif __grains__["os"] == "AIX":
fstab_data = __salt__["mount.filesystems"](config)
else:
fstab_data = __salt__["mount.fstab"](config)
if __opts__["test"]:
ret["result"] = None
if fs_file not in fstab_data:
msg = "{} entry is already missing in {}."
ret["comment"].append(msg.format(fs_file, config))
else:
msg = "{} entry will be removed from {}."
ret["comment"].append(msg.format(fs_file, config))
return ret
if fs_file in fstab_data:
if __grains__["os"] in ["MacOS", "Darwin"]:
out = __salt__["mount.rm_automaster"](
name=fs_file, device=fs_spec, config=config
)
elif __grains__["os"] == "AIX":
out = __salt__["mount.rm_filesystems"](
name=fs_file, device=fs_spec, config=config
)
else:
out = __salt__["mount.rm_fstab"](
name=fs_file, device=fs_spec, config=config
)
if out is not True:
ret["result"] = False
msg = "{} entry failed when removing from {}."
ret["comment"].append(msg.format(fs_file, config))
else:
ret["result"] = True
ret["changes"]["persist"] = "removed"
msg = "{} entry removed from {}."
ret["comment"].append(msg.format(fs_file, config))
else:
ret["result"] = True
msg = "{} entry is already missing in {}."
ret["comment"].append(msg.format(fs_file, config))
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mount.py | 0.546012 | 0.166574 | mount.py | pypi |
__func_alias__ = {"set_": "set"}
def __virtual__():
"""
Only load if alternatives execution module is available.
"""
if "alternatives.auto" in __salt__:
return True
return (False, "alernatives module could not be loaded")
def install(name, link, path, priority):
"""
Install new alternative for defined <name>
name
is the master name for this link group
(e.g. pager)
link
is the symlink pointing to /etc/alternatives/<name>.
(e.g. /usr/bin/pager)
path
is the location of the new alternative target.
NB: This file / directory must already exist.
(e.g. /usr/bin/less)
priority
is an integer; options with higher numbers have higher priority in
automatic mode.
"""
ret = {
"name": name,
"link": link,
"path": path,
"priority": priority,
"result": True,
"changes": {},
"comment": "",
}
if __salt__["alternatives.check_exists"](name, path):
ret["comment"] = "Alternative {} for {} is already registered".format(
path, name
)
else:
if __opts__["test"]:
ret[
"comment"
] = "Alternative will be set for {} to {} with priority {}".format(
name, path, priority
)
ret["result"] = None
return ret
out = __salt__["alternatives.install"](name, link, path, priority)
if __salt__["alternatives.check_exists"](name, path):
if __salt__["alternatives.check_installed"](name, path):
ret[
"comment"
] = "Alternative for {} set to path {} with priority {}".format(
name, path, priority
)
else:
ret["comment"] = (
"Alternative {} for {} registered with priority {} and "
"not set to default".format(path, name, priority)
)
ret["changes"] = {
"name": name,
"link": link,
"path": path,
"priority": priority,
}
else:
ret["result"] = False
ret["comment"] = "Alternative for {} not installed: {}".format(name, out)
return ret
def remove(name, path):
"""
Removes installed alternative for defined <name> and <path>
or fallback to default alternative, if some defined before.
name
is the master name for this link group
(e.g. pager)
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less)
"""
ret = {"name": name, "path": path, "result": True, "changes": {}, "comment": ""}
isinstalled = __salt__["alternatives.check_exists"](name, path)
if isinstalled:
if __opts__["test"]:
ret["comment"] = "Alternative for {} will be removed".format(name)
ret["result"] = None
return ret
__salt__["alternatives.remove"](name, path)
current = __salt__["alternatives.show_current"](name)
if current:
ret["result"] = True
ret[
"comment"
] = "Alternative for {} removed. Falling back to path {}".format(
name, current
)
ret["changes"] = {"path": current}
return ret
ret["comment"] = "Alternative for {} removed".format(name)
ret["changes"] = {}
return ret
current = __salt__["alternatives.show_current"](name)
if current:
ret["result"] = True
ret["comment"] = "Alternative for {} is set to it's default path {}".format(
name, current
)
return ret
ret["result"] = False
ret["comment"] = "Alternative for {} doesn't exist".format(name)
return ret
def auto(name):
"""
.. versionadded:: 0.17.0
Instruct alternatives to use the highest priority
path for <name>
name
is the master name for this link group
(e.g. pager)
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
display = __salt__["alternatives.display"](name)
line = display.splitlines()[0]
if line.endswith(" auto mode"):
ret["comment"] = "{} already in auto mode".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "{} will be put in auto mode".format(name)
ret["result"] = None
return ret
ret["changes"]["result"] = __salt__["alternatives.auto"](name)
return ret
def set_(name, path):
"""
.. versionadded:: 0.17.0
Sets alternative for <name> to <path>, if <path> is defined
as an alternative for <name>.
name
is the master name for this link group
(e.g. pager)
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less)
.. code-block:: yaml
foo:
alternatives.set:
- path: /usr/bin/foo-2.0
"""
ret = {"name": name, "path": path, "result": True, "changes": {}, "comment": ""}
current = __salt__["alternatives.show_current"](name)
if current == path:
ret["comment"] = "Alternative for {} already set to {}".format(name, path)
return ret
display = __salt__["alternatives.display"](name)
isinstalled = False
for line in display.splitlines():
if line.startswith(path):
isinstalled = True
break
if isinstalled:
if __opts__["test"]:
ret["comment"] = "Alternative for {} will be set to path {}".format(
name, path
)
ret["result"] = None
return ret
__salt__["alternatives.set"](name, path)
current = __salt__["alternatives.show_current"](name)
if current == path:
ret["comment"] = "Alternative for {} set to path {}".format(name, current)
ret["changes"] = {"path": current}
else:
ret["comment"] = "Alternative for {} not updated".format(name)
return ret
else:
ret["result"] = False
ret["comment"] = "Alternative {} for {} doesn't exist".format(path, name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/alternatives.py | 0.541409 | 0.2205 | alternatives.py | pypi |
def __virtual__():
"""
Only load if the splunk module is available in __salt__
"""
if "splunk.list_users" in __salt__:
return "splunk"
return (False, "splunk module could not be loaded")
def present(email, profile="splunk", **kwargs):
"""
Ensure a user is present
.. code-block:: yaml
ensure example test user 1:
splunk.user_present:
- realname: 'Example TestUser1'
- name: 'exampleuser'
- email: 'example@domain.com'
- roles: ['user']
The following parameters are required:
email
This is the email of the user in splunk
"""
name = kwargs.get("name")
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
target = __salt__["splunk.get_user"](email, profile=profile, user_details=True)
if not target:
if __opts__["test"]:
ret["comment"] = "User {} will be created".format(name)
return ret
# create the user
result = __salt__["splunk.create_user"](email, profile=profile, **kwargs)
if result:
ret["changes"].setdefault("old", None)
ret["changes"].setdefault("new", "User {} exists".format(name))
ret["result"] = True
else:
ret["result"] = False
ret["comment"] = "Failed to create {}".format(name)
return ret
else:
ret["comment"] = "User {} set to be updated.".format(name)
if __opts__["test"]:
ret["result"] = None
return ret
# found a user... updating
result = __salt__["splunk.update_user"](email, profile, **kwargs)
if isinstance(result, bool) and result:
# no update
ret["result"] = None
ret["comment"] = "No changes"
else:
diff = {}
for field in [
"name",
"realname",
"roles",
"defaultApp",
"tz",
"capabilities",
]:
if field == "roles":
diff["roles"] = list(
set(target.get(field, [])).symmetric_difference(
set(result.get(field, []))
)
)
elif target.get(field) != result.get(field):
diff[field] = result.get(field)
newvalues = result
ret["result"] = True
ret["changes"]["diff"] = diff
ret["changes"]["old"] = target
ret["changes"]["new"] = newvalues
return ret
def absent(email, profile="splunk", **kwargs):
"""
Ensure a splunk user is absent
.. code-block:: yaml
ensure example test user 1:
splunk.absent:
- email: 'example@domain.com'
- name: 'exampleuser'
The following parameters are required:
email
This is the email of the user in splunk
name
This is the splunk username used to identify the user.
"""
user_identity = kwargs.get("name")
ret = {
"name": user_identity,
"changes": {},
"result": None,
"comment": "User {} is absent.".format(user_identity),
}
target = __salt__["splunk.get_user"](email, profile=profile)
if not target:
ret["comment"] = "User {} does not exist".format(user_identity)
ret["result"] = True
return ret
if __opts__["test"]:
ret["comment"] = "User {} is all set to be deleted".format(user_identity)
ret["result"] = None
return ret
result = __salt__["splunk.delete_user"](email, profile=profile)
if result:
ret["comment"] = "Deleted user {}".format(user_identity)
ret["changes"].setdefault("old", "User {} exists".format(user_identity))
ret["changes"].setdefault("new", "User {} deleted".format(user_identity))
ret["result"] = True
else:
ret["comment"] = "Failed to delete {}".format(user_identity)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/splunk.py | 0.707101 | 0.236836 | splunk.py | pypi |
import salt.utils.dictupdate as dictupdate
from requests.exceptions import HTTPError
from salt.utils.dictdiffer import deep_diff
def __virtual__():
"""Only load if grafana4 module is available"""
if "grafana4.get_org" in __salt__:
return True
return (False, "grafana4 module could not be loaded")
def present(
name,
users=None,
theme=None,
home_dashboard_id=None,
timezone=None,
address1=None,
address2=None,
city=None,
zip_code=None,
address_state=None,
country=None,
profile="grafana",
):
"""
Ensure that an organization is present.
name
Name of the org.
users
Optional - Dict of user/role associated with the org. Example:
.. code-block:: yaml
users:
foo: Viewer
bar: Editor
theme
Optional - Selected theme for the org.
home_dashboard_id
Optional - Home dashboard for the org.
timezone
Optional - Timezone for the org (one of: "browser", "utc", or "").
address1
Optional - address1 of the org.
address2
Optional - address2 of the org.
city
Optional - city of the org.
zip_code
Optional - zip_code of the org.
address_state
Optional - state of the org.
country
Optional - country of the org.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
create = False
try:
org = __salt__["grafana4.get_org"](name, profile)
except HTTPError as e:
if e.response.status_code == 404:
create = True
else:
raise
if create:
if __opts__["test"]:
ret["comment"] = "Org {} will be created".format(name)
return ret
__salt__["grafana4.create_org"](profile=profile, name=name)
org = __salt__["grafana4.get_org"](name, profile)
ret["changes"] = org
ret["comment"] = "New org {} added".format(name)
data = _get_json_data(
address1=address1,
address2=address2,
city=city,
zipCode=zip_code,
state=address_state,
country=country,
defaults=org["address"],
)
if data != org["address"]:
if __opts__["test"]:
ret["comment"] = "Org {} address will be updated".format(name)
return ret
__salt__["grafana4.update_org_address"](name, profile=profile, **data)
if create:
dictupdate.update(ret["changes"]["address"], data)
else:
dictupdate.update(ret["changes"], deep_diff(org["address"], data))
prefs = __salt__["grafana4.get_org_prefs"](name, profile=profile)
data = _get_json_data(
theme=theme,
homeDashboardId=home_dashboard_id,
timezone=timezone,
defaults=prefs,
)
if data != prefs:
if __opts__["test"]:
ret["comment"] = "Org {} prefs will be updated".format(name)
return ret
__salt__["grafana4.update_org_prefs"](name, profile=profile, **data)
if create:
dictupdate.update(ret["changes"], data)
else:
dictupdate.update(ret["changes"], deep_diff(prefs, data))
if users:
db_users = {}
for item in __salt__["grafana4.get_org_users"](name, profile=profile):
db_users[item["login"]] = {
"userId": item["userId"],
"role": item["role"],
}
for username, role in users.items():
if username in db_users:
if role is False:
if __opts__["test"]:
ret["comment"] = "Org {} user {} will be deleted".format(
name, username
)
return ret
__salt__["grafana4.delete_org_user"](
db_users[username]["userId"], profile=profile
)
elif role != db_users[username]["role"]:
if __opts__["test"]:
ret["comment"] = "Org {} user {} role will be updated".format(
name, username
)
return ret
__salt__["grafana4.update_org_user"](
db_users[username]["userId"],
loginOrEmail=username,
role=role,
profile=profile,
)
elif role:
if __opts__["test"]:
ret["comment"] = "Org {} user {} will be created".format(
name, username
)
return ret
__salt__["grafana4.create_org_user"](
loginOrEmail=username, role=role, profile=profile
)
new_db_users = {}
for item in __salt__["grafana4.get_org_users"](name, profile=profile):
new_db_users[item["login"]] = {
"userId": item["userId"],
"role": item["role"],
}
if create:
dictupdate.update(ret["changes"], new_db_users)
else:
dictupdate.update(ret["changes"], deep_diff(db_users, new_db_users))
ret["result"] = True
if not create:
if ret["changes"]:
ret["comment"] = "Org {} updated".format(name)
else:
ret["changes"] = {}
ret["comment"] = "Org {} already up-to-date".format(name)
return ret
def absent(name, profile="grafana"):
"""
Ensure that a org is present.
name
Name of the org to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
org = __salt__["grafana4.get_org"](name, profile)
if not org:
ret["result"] = True
ret["comment"] = "Org {} already absent".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Org {} will be deleted".format(name)
return ret
__salt__["grafana4.delete_org"](org["id"], profile=profile)
ret["result"] = True
ret["changes"][name] = "Absent"
ret["comment"] = "Org {} was deleted".format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grafana4_org.py | 0.566139 | 0.192729 | grafana4_org.py | pypi |
import salt.utils.args
_DEFAULT_SERVER = "SmtpSvc/1"
def __virtual__():
"""
Load only on minions that have the win_smtp_server module.
"""
if "win_smtp_server.get_server_setting" in __salt__:
return True
return (False, "win_smtp_server module could not be loaded")
def _merge_dicts(*args):
"""
Shallow copy and merge dicts together, giving precedence to last in.
"""
ret = dict()
for arg in args:
ret.update(arg)
return ret
def _normalize_server_settings(**settings):
"""
Convert setting values that has been improperly converted to a dict back to a string.
"""
ret = dict()
settings = salt.utils.args.clean_kwargs(**settings)
for setting in settings:
if isinstance(settings[setting], dict):
value_from_key = next(iter(settings[setting].keys()))
ret[setting] = "{{{0}}}".format(value_from_key)
else:
ret[setting] = settings[setting]
return ret
def server_setting(name, settings=None, server=_DEFAULT_SERVER):
"""
Ensure the value is set for the specified setting.
.. note::
The setting names are case-sensitive.
:param str settings: A dictionary of the setting names and their values.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-settings:
win_smtp_server.server_setting:
- settings:
LogType: 1
LogFilePeriod: 1
MaxMessageSize: 16777216
MaxRecipients: 10000
MaxSessionSize: 16777216
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
if not settings:
ret["comment"] = "No settings to change provided."
ret["result"] = True
return ret
ret_settings = dict()
ret_settings["changes"] = {}
ret_settings["failures"] = {}
current_settings = __salt__["win_smtp_server.get_server_setting"](
settings=settings.keys(), server=server
)
for key in settings:
# Some fields are formatted like '{data}'. Salt/Python converts these to dicts
# automatically on input, so convert them back to the proper format.
settings = _normalize_server_settings(**settings)
if str(settings[key]) != str(current_settings[key]):
ret_settings["changes"][key] = {
"old": current_settings[key],
"new": settings[key],
}
if not ret_settings["changes"]:
ret["comment"] = "Settings already contain the provided values."
ret["result"] = True
return ret
elif __opts__["test"]:
ret["comment"] = "Settings will be changed."
ret["changes"] = ret_settings
return ret
__salt__["win_smtp_server.set_server_setting"](settings=settings, server=server)
new_settings = __salt__["win_smtp_server.get_server_setting"](
settings=settings.keys(), server=server
)
for key in settings:
if str(new_settings[key]) != str(settings[key]):
ret_settings["failures"][key] = {
"old": current_settings[key],
"new": new_settings[key],
}
ret_settings["changes"].pop(key, None)
if ret_settings["failures"]:
ret["comment"] = "Some settings failed to change."
ret["changes"] = ret_settings
ret["result"] = False
else:
ret["comment"] = "Set settings to contain the provided values."
ret["changes"] = ret_settings["changes"]
ret["result"] = True
return ret
def active_log_format(name, log_format, server=_DEFAULT_SERVER):
"""
Manage the active log format for the SMTP server.
:param str log_format: The log format name.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-log-format:
win_smtp_server.active_log_format:
- log_format: Microsoft IIS Log File Format
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
current_log_format = __salt__["win_smtp_server.get_log_format"](server)
if log_format == current_log_format:
ret[
"comment"
] = "LogPluginClsid already contains the id of the provided log format."
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "LogPluginClsid will be changed."
ret["changes"] = {"old": current_log_format, "new": log_format}
else:
ret[
"comment"
] = "Set LogPluginClsid to contain the id of the provided log format."
ret["changes"] = {"old": current_log_format, "new": log_format}
ret["result"] = __salt__["win_smtp_server.set_log_format"](log_format, server)
return ret
def connection_ip_list(
name, addresses=None, grant_by_default=False, server=_DEFAULT_SERVER
):
"""
Manage IP list for SMTP connections.
:param str addresses: A dictionary of IP + subnet pairs.
:param bool grant_by_default: Whether the addresses should be a blacklist or whitelist.
:param str server: The SMTP server name.
Example of usage for creating a whitelist:
.. code-block:: yaml
smtp-connection-whitelist:
win_smtp_server.connection_ip_list:
- addresses:
127.0.0.1: 255.255.255.255
172.16.1.98: 255.255.255.255
172.16.1.99: 255.255.255.255
- grant_by_default: False
Example of usage for creating a blacklist:
.. code-block:: yaml
smtp-connection-blacklist:
win_smtp_server.connection_ip_list:
- addresses:
172.16.1.100: 255.255.255.255
172.16.1.101: 255.255.255.255
- grant_by_default: True
Example of usage for allowing any source to connect:
.. code-block:: yaml
smtp-connection-blacklist:
win_smtp_server.connection_ip_list:
- addresses: {}
- grant_by_default: True
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
if not addresses:
addresses = dict()
current_addresses = __salt__["win_smtp_server.get_connection_ip_list"](
server=server
)
if addresses == current_addresses:
ret["comment"] = "IPGrant already contains the provided addresses."
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "IPGrant will be changed."
ret["changes"] = {"old": current_addresses, "new": addresses}
else:
ret["comment"] = "Set IPGrant to contain the provided addresses."
ret["changes"] = {"old": current_addresses, "new": addresses}
ret["result"] = __salt__["win_smtp_server.set_connection_ip_list"](
addresses=addresses, grant_by_default=grant_by_default, server=server
)
return ret
def relay_ip_list(name, addresses=None, server=_DEFAULT_SERVER):
"""
Manage IP list for SMTP relay connections.
Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve
the existing list you wish to set from a pre-configured server.
For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate
an actual relay IP list similar to the following:
.. code-block:: cfg
['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0',
'0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0',
'0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0',
'255.255.255.255', '127.0.0.1']
.. note::
Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter
with an empty access list configured, and setting an empty list/tuple corresponds to the
more permissive 'All except the list below' GUI parameter.
:param str addresses: A list of the relay IPs. The order of the list is important.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-relay-list:
win_smtp_server.relay_ip_list:
- addresses:
- 24.0.0.128
- 32.0.0.128
- 60.0.0.128
- 1.0.0.0
- 76.0.0.0
- 0.0.0.0
- 0.0.0.0
- 1.0.0.0
- 1.0.0.0
- 2.0.0.0
- 2.0.0.0
- 4.0.0.0
- 0.0.0.0
- 76.0.0.128
- 0.0.0.0
- 0.0.0.0
- 0.0.0.0
- 0.0.0.0
- 255.255.255.255
- 127.0.0.1
Example of usage for disabling relaying:
.. code-block:: yaml
smtp-relay-list:
win_smtp_server.relay_ip_list:
- addresses: None
Example of usage for allowing relaying from any source:
.. code-block:: yaml
smtp-relay-list:
win_smtp_server.relay_ip_list:
- addresses: []
"""
ret = {"name": name, "changes": {}, "comment": "", "result": None}
current_addresses = __salt__["win_smtp_server.get_relay_ip_list"](server=server)
# Fix if we were passed None as a string.
if addresses:
if addresses[0] == "None":
addresses[0] = None
elif addresses is None:
addresses = [None]
if addresses == current_addresses:
ret["comment"] = "RelayIpList already contains the provided addresses."
ret["result"] = True
elif __opts__["test"]:
ret["comment"] = "RelayIpList will be changed."
ret["changes"] = {"old": current_addresses, "new": addresses}
else:
ret["comment"] = "Set RelayIpList to contain the provided addresses."
ret["changes"] = {"old": current_addresses, "new": addresses}
ret["result"] = __salt__["win_smtp_server.set_relay_ip_list"](
addresses=addresses, server=server
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_smtp_server.py | 0.656768 | 0.172642 | win_smtp_server.py | pypi |
__virtualname__ = "neutron_secgroup"
def __virtual__():
if "neutronng.list_subnets" in __salt__:
return __virtualname__
return (
False,
"The neutronng execution module failed to load: shade python module is not available",
)
def present(name, auth=None, **kwargs):
"""
Ensure a security group exists.
You can supply either project_name or project_id.
Creating a default security group will not show up as a change;
it gets created through the lookup process.
name
Name of the security group
description
Description of the security group
project_name
Name of Project
project_id
ID of Project
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
if "project_name" in kwargs:
kwargs["project_id"] = kwargs["project_name"]
del kwargs["project_name"]
project = __salt__["keystoneng.project_get"](name=kwargs["project_id"])
if project is None:
ret["result"] = False
ret["comment"] = "project does not exist"
return ret
secgroup = __salt__["neutronng.security_group_get"](
name=name, filters={"tenant_id": project.id}
)
if secgroup is None:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Security Group will be created."
return ret
secgroup = __salt__["neutronng.security_group_create"](**kwargs)
ret["changes"] = secgroup
ret["comment"] = "Created security group"
return ret
changes = __salt__["neutronng.compare_changes"](secgroup, **kwargs)
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Security Group will be updated."
return ret
__salt__["neutronng.security_group_update"](secgroup=secgroup, **changes)
ret["changes"].update(changes)
ret["comment"] = "Updated security group"
return ret
def absent(name, auth=None, **kwargs):
"""
Ensure a security group does not exist
name
Name of the security group
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
kwargs["project_id"] = __salt__["keystoneng.project_get"](
name=kwargs["project_name"]
)
secgroup = __salt__["neutronng.security_group_get"](
name=name, filters={"project_id": kwargs["project_id"]}
)
if secgroup:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"id": secgroup.id}
ret["comment"] = "Security group will be deleted."
return ret
__salt__["neutronng.security_group_delete"](name=secgroup)
ret["changes"]["id"] = name
ret["comment"] = "Deleted security group"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/neutron_secgroup.py | 0.731155 | 0.189802 | neutron_secgroup.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.