code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import re
from salt.exceptions import CommandExecutionError
def __virtual__():
"""
This state is only available on Minions which support sysctl
"""
if "sysctl.show" in __salt__:
return True
return (False, "sysctl module could not be loaded")
def present(name, value, config=None):
"""
Ensure that the named sysctl value is set in memory and persisted to the
named configuration file. The default sysctl configuration file is
/etc/sysctl.conf
name
The name of the sysctl value to edit
value
The sysctl value to apply. Make sure to set the value to the correct expected
output for systctl or reading the respective /proc/sys file. For example, instead
of adding the value `1,2,3` you might need to write `1-3`. If you do not set
the correct value, Salt will continue to return with changes.
config
The location of the sysctl configuration file. If not specified, the
proper location will be detected based on platform.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
if config is None:
# Certain linux systems will ignore /etc/sysctl.conf, get the right
# default configuration file.
if "sysctl.default_config" in __salt__:
config = __salt__["sysctl.default_config"]()
else:
config = "/etc/sysctl.conf"
if __opts__["test"]:
configured = __salt__["sysctl.show"](config_file=config)
if configured is None:
ret["result"] = None
ret["comment"] = (
"Sysctl option {} might be changed, we failed to check "
"config file at {}. The file is either unreadable, or "
"missing.".format(name, config)
)
return ret
current = __salt__["sysctl.get"](name)
if current:
if name in configured:
if str(value).split() == current.split():
ret["result"] = True
ret["comment"] = "Sysctl value {} = {} is already set".format(
name, value
)
return ret
else:
if re.sub(" +|\t+", " ", current) != re.sub(" +|\t+", " ", str(value)):
ret["result"] = None
ret["comment"] = "Sysctl option {} set to be changed to {}".format(
name, value
)
return ret
else:
ret["result"] = None
ret["comment"] = (
"Sysctl value is currently set on the running system but "
"not in a config file. Sysctl option {} set to be "
"changed to {} in config file.".format(name, value)
)
return ret
elif not current and name in configured:
ret["result"] = None
ret["comment"] = (
"Sysctl value {0} is present in configuration file but is not "
"present in the running config. The value {0} is set to be "
"changed to {1}".format(name, value)
)
return ret
# otherwise, we don't have it set anywhere and need to set it
ret["result"] = None
ret["comment"] = "Sysctl option {} would be changed to {}".format(name, value)
return ret
try:
update = __salt__["sysctl.persist"](name, value, config)
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = "Failed to set {} to {}: {}".format(name, value, exc)
return ret
if update == "Updated":
ret["changes"] = {name: value}
ret["comment"] = "Updated sysctl value {} = {}".format(name, value)
elif update == "Already set":
ret["comment"] = "Sysctl value {} = {} is already set".format(name, value)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/sysctl.py | 0.416678 | 0.277608 | sysctl.py | pypi |
def __virtual__():
if "ddns.update" in __salt__:
return "ddns"
return (False, "ddns module could not be loaded")
def present(name, zone, ttl, data, rdtype="A", **kwargs):
"""
Ensures that the named DNS record is present with the given ttl.
name
The host portion of the DNS record, e.g., 'webserver'. Name and zone
are concatenated when the entry is created unless name includes a
trailing dot, so make sure that information is not duplicated in these
two arguments.
zone
The zone to check/update
ttl
TTL for the record
data
Data for the DNS record. E.g., the IP address for an A record.
rdtype
DNS resource type. Default 'A'.
``**kwargs``
Additional arguments the ddns.update function may need (e.g.
nameserver, keyfile, keyname). Note that the nsupdate key file can’t
be reused by this function, the keyfile and other arguments must
follow the `dnspython <http://www.dnspython.org/>`_ spec.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = '{} record "{}" will be updated'.format(rdtype, name)
return ret
status = __salt__["ddns.update"](zone, name, ttl, rdtype, data, **kwargs)
if status is None:
ret["result"] = True
ret["comment"] = '{} record "{}" already present with ttl of {}'.format(
rdtype, name, ttl
)
elif status:
ret["result"] = True
ret["comment"] = 'Updated {} record for "{}"'.format(rdtype, name)
ret["changes"] = {
"name": name,
"zone": zone,
"ttl": ttl,
"rdtype": rdtype,
"data": data,
}
else:
ret["result"] = False
ret["comment"] = 'Failed to create or update {} record for "{}"'.format(
rdtype, name
)
return ret
def absent(name, zone, data=None, rdtype=None, **kwargs):
"""
Ensures that the named DNS record is absent.
name
The host portion of the DNS record, e.g., 'webserver'. Name and zone
are concatenated when the entry is created unless name includes a
trailing dot, so make sure that information is not duplicated in these
two arguments.
zone
The zone to check
data
Data for the DNS record. E.g., the IP address for an A record. If omitted,
all records matching name (and rdtype, if provided) will be purged.
rdtype
DNS resource type. If omitted, all types will be purged.
``**kwargs``
Additional arguments the ddns.update function may need (e.g.
nameserver, keyfile, keyname). Note that the nsupdate key file can’t
be reused by this function, the keyfile and other arguments must
follow the `dnspython <http://www.dnspython.org/>`_ spec.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = '{} record "{}" will be deleted'.format(rdtype, name)
return ret
status = __salt__["ddns.delete"](zone, name, rdtype, data, **kwargs)
if status is None:
ret["result"] = True
ret["comment"] = "No matching DNS record(s) present"
elif status:
ret["result"] = True
ret["comment"] = "Deleted DNS record(s)"
ret["changes"] = {"Deleted": {"name": name, "zone": zone}}
else:
ret["result"] = False
ret["comment"] = "Failed to delete DNS record(s)"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/ddns.py | 0.79858 | 0.330363 | ddns.py | pypi |
import re
from salt.defaults import DEFAULT_TARGET_DELIM
def exists(name, delimiter=DEFAULT_TARGET_DELIM):
"""
Ensure that a grain is set
name
The grain name
delimiter
A delimiter different from the default can be provided.
Check whether a grain exists. Does not attempt to check or set the value.
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": "Grain exists"}
_non_existent = object()
existing = __salt__["grains.get"](name, _non_existent)
if existing is _non_existent:
ret["result"] = False
ret["comment"] = "Grain does not exist"
return ret
def make_hashable(list_grain, result=None):
"""
Ensure that a list grain is hashable.
list_grain
The list grain that should be hashable
result
This function is recursive, so it must be possible to use a
sublist as parameter to the function. Should not be used by a caller
outside of the function.
Make it possible to compare two list grains to each other if the list
contains complex objects.
"""
result = result or set()
for sublist in list_grain:
if type(sublist) == list:
make_hashable(sublist, result)
else:
result.add(frozenset(sublist))
return result
def present(name, value, delimiter=DEFAULT_TARGET_DELIM, force=False):
"""
Ensure that a grain is set
.. versionchanged:: 2015.8.2
name
The grain name
value
The value to set on the grain
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: 2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
It is now capable to set a grain to a complex value (ie. lists and dicts)
and supports nested grains as well.
If the grain does not yet exist, a new grain is set to the given value. For
a nested grain, the necessary keys are created if they don't exist. If
a given key is an existing value, it will be converted, but an existing value
different from the given key will fail the state.
If the grain with the given name exists, its value is updated to the new
value unless its existing or provided value is complex (list or dict). Use
`force: True` to overwrite.
.. code-block:: yaml
cheese:
grains.present:
- value: edam
nested_grain_with_complex_value:
grains.present:
- name: icinga:Apache SSL
- value:
- command: check_https
- params: -H localhost -p 443 -S
with,a,custom,delimiter:
grains.present:
- value: yay
- delimiter: ','
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
_non_existent = object()
existing = __salt__["grains.get"](name, _non_existent)
if existing == value:
ret["comment"] = "Grain is already set"
return ret
if __opts__["test"]:
ret["result"] = None
if existing is _non_existent:
ret["comment"] = "Grain {} is set to be added".format(name)
ret["changes"] = {"new": name}
else:
ret["comment"] = "Grain {} is set to be changed".format(name)
ret["changes"] = {"changed": {name: value}}
return ret
ret = __salt__["grains.set"](name, value, force=force)
if ret["result"] is True and ret["changes"] != {}:
ret["comment"] = "Set grain {} to {}".format(name, value)
ret["name"] = name
return ret
def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 2014.1.0
Ensure the value is present in the list-type grain. Note: If the grain that is
provided in ``name`` is not present on the system, this new grain will be created
with the corresponding provided value.
name
The grain name.
value
The value is present in the list type grain.
delimiter
A delimiter different from the default ``:`` can be provided.
.. versionadded:: 2015.8.2
The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_
.. code-block:: yaml
roles:
grains.list_present:
- value: web
For multiple grains, the syntax looks like:
.. code-block:: yaml
roles:
grains.list_present:
- value:
- web
- dev
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name)
if grain:
# check whether grain is a list
if not isinstance(grain, list):
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
return ret
if isinstance(value, list):
if make_hashable(value).issubset(
make_hashable(__salt__["grains.get"](name))
):
ret["comment"] = "Value {1} is already in grain {0}".format(name, value)
return ret
elif name in __context__.get("pending_grains", {}):
# elements common to both
intersection = set(value).intersection(
__context__.get("pending_grains", {})[name]
)
if intersection:
value = list(
set(value).difference(__context__["pending_grains"][name])
)
ret[
"comment"
] = 'Removed value {} from update due to context found in "{}".\n'.format(
value, name
)
if "pending_grains" not in __context__:
__context__["pending_grains"] = {}
if name not in __context__["pending_grains"]:
__context__["pending_grains"][name] = set()
__context__["pending_grains"][name].update(value)
else:
if value in grain:
ret["comment"] = "Value {1} is already in grain {0}".format(name, value)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value {1} is set to be appended to grain {0}".format(
name, value
)
ret["changes"] = {"new": grain}
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Grain {} is set to be added".format(name)
ret["changes"] = {"new": grain}
return ret
new_grains = __salt__["grains.append"](name, value)
if isinstance(value, list):
if not set(value).issubset(set(__salt__["grains.get"](name))):
ret["result"] = False
ret["comment"] = "Failed append value {1} to grain {0}".format(name, value)
return ret
else:
if value not in __salt__["grains.get"](name, delimiter=DEFAULT_TARGET_DELIM):
ret["result"] = False
ret["comment"] = "Failed append value {1} to grain {0}".format(name, value)
return ret
ret["comment"] = "Append value {1} to grain {0}".format(name, value)
ret["changes"] = {"new": new_grains}
return ret
def list_absent(name, value, delimiter=DEFAULT_TARGET_DELIM):
"""
Delete a value from a grain formed as a list.
.. versionadded:: 2014.1.0
name
The grain name.
value
The value to delete from the grain list.
delimiter
A delimiter different from the default ``:`` can be provided.
.. versionadded:: 2015.8.2
The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_
.. code-block:: yaml
roles:
grains.list_absent:
- value: db
For multiple grains, the syntax looks like:
.. code-block:: yaml
roles:
grains.list_absent:
- value:
- web
- dev
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
comments = []
grain = __salt__["grains.get"](name, None)
if grain:
if isinstance(grain, list):
if not isinstance(value, list):
value = [value]
for val in value:
if val not in grain:
comments.append(
"Value {1} is absent from grain {0}".format(name, val)
)
elif __opts__["test"]:
ret["result"] = None
comments.append(
"Value {1} in grain {0} is set to be deleted".format(name, val)
)
if "deleted" not in ret["changes"].keys():
ret["changes"] = {"deleted": []}
ret["changes"]["deleted"].append(val)
elif val in grain:
__salt__["grains.remove"](name, val)
comments.append(
"Value {1} was deleted from grain {0}".format(name, val)
)
if "deleted" not in ret["changes"].keys():
ret["changes"] = {"deleted": []}
ret["changes"]["deleted"].append(val)
ret["comment"] = "\n".join(comments)
return ret
else:
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
else:
ret["comment"] = "Grain {} does not exist".format(name)
return ret
def absent(name, destructive=False, delimiter=DEFAULT_TARGET_DELIM, force=False):
"""
.. versionadded:: 2014.7.0
Delete a grain from the grains config file
name
The grain name
destructive
If destructive is True, delete the entire grain. If
destructive is False, set the grain's value to None. Defaults to False.
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: 2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
.. versionchanged:: 2015.8.2
This state now support nested grains and complex values. It is also more
conservative: if a grain has a value that is a list or a dict, it will
not be removed unless the `force` parameter is True.
.. code-block:: yaml
grain_name:
grains.absent
"""
_non_existent = object()
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name, _non_existent)
if grain is None:
if __opts__["test"]:
ret["result"] = None
if destructive is True:
ret["comment"] = "Grain {} is set to be deleted".format(name)
ret["changes"] = {"deleted": name}
return ret
ret = __salt__["grains.set"](name, None, destructive=destructive, force=force)
if ret["result"]:
if destructive is True:
ret["comment"] = "Grain {} was deleted".format(name)
ret["changes"] = {"deleted": name}
ret["name"] = name
elif grain is not _non_existent:
if __opts__["test"]:
ret["result"] = None
if destructive is True:
ret["comment"] = "Grain {} is set to be deleted".format(name)
ret["changes"] = {"deleted": name}
else:
ret[
"comment"
] = "Value for grain {} is set to be deleted (None)".format(name)
ret["changes"] = {"grain": name, "value": None}
return ret
ret = __salt__["grains.set"](name, None, destructive=destructive, force=force)
if ret["result"]:
if destructive is True:
ret["comment"] = "Grain {} was deleted".format(name)
ret["changes"] = {"deleted": name}
else:
ret["comment"] = "Value for grain {} was set to None".format(name)
ret["changes"] = {"grain": name, "value": None}
ret["name"] = name
else:
ret["comment"] = "Grain {} does not exist".format(name)
return ret
def append(name, value, convert=False, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 2014.7.0
Append a value to a list in the grains config file. The grain that is being
appended to (name) must exist before the new value can be added.
name
The grain name
value
The value to append
convert
If convert is True, convert non-list contents into a list.
If convert is False and the grain contains non-list contents, an error
is given. Defaults to False.
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
.. code-block:: yaml
grain_name:
grains.append:
- value: to_be_appended
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name, None)
# Check if bool(grain) is False or if the grain is specified in the minions
# grains. Grains can be set to a None value by omitting a value in the
# definition.
if grain or name in __grains__:
if isinstance(grain, list):
if value in grain:
ret[
"comment"
] = "Value {1} is already in the list for grain {0}".format(name, value)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value {1} in grain {0} is set to be added".format(
name, value
)
ret["changes"] = {"added": value}
return ret
__salt__["grains.append"](name, value)
ret["comment"] = "Value {1} was added to grain {0}".format(name, value)
ret["changes"] = {"added": value}
else:
if convert is True:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Grain {} is set to be converted "
"to list and value {} will be "
"added".format(name, value)
)
ret["changes"] = {"added": value}
return ret
grain = [] if grain is None else [grain]
grain.append(value)
__salt__["grains.setval"](name, grain)
ret["comment"] = "Value {1} was added to grain {0}".format(name, value)
ret["changes"] = {"added": value}
else:
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
else:
ret["result"] = False
ret["comment"] = "Grain {} does not exist".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grains.py | 0.650467 | 0.343012 | grains.py | pypi |
def present(name=None, data=None, ensure_data=True, **api_opts):
"""
This will ensure that a host with the provided name exists.
This will try to ensure that the state of the host matches the given data
If the host is not found then one will be created.
When trying to update a hostname ensure `name` is set to the hostname
of the current record. You can give a new name in the `data.name`.
Avoid race conditions, use func:nextavailableip:
- func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default
- func:nextavailableip:10.0.0.0/8
- func:nextavailableip:10.0.0.0/8,externalconfigure_for_dns
- func:nextavailableip:10.0.0.3-10.0.0.10
State Example:
.. code-block:: yaml
# this would update `original_hostname.example.ca` to changed `data`.
infoblox_host_record.present:
- name: original_hostname.example.ca
- data: {'namhostname.example.cae': 'hostname.example.ca',
'aliases': ['hostname.math.example.ca'],
'extattrs': [{'Business Contact': {'value': 'EXAMPLE@example.ca'}}],
'ipv4addrs': [{'configure_for_dhcp': True,
'ipv4addr': 'func:nextavailableip:129.97.139.0/24',
'mac': '00:50:56:84:6e:ae'}],
'ipv6addrs': [], }
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if data is None:
data = {}
if "name" not in data:
data.update({"name": name})
obj = __salt__["infoblox.get_host"](name=name, **api_opts)
if obj is None:
# perhaps the user updated the name
obj = __salt__["infoblox.get_host"](name=data["name"], **api_opts)
if obj:
# warn user that the host name was updated and does not match
ret["result"] = False
ret[
"comment"
] = "please update the name: {} to equal the updated data name {}".format(
name, data["name"]
)
return ret
if obj:
if not ensure_data:
ret["result"] = True
ret[
"comment"
] = "infoblox record already created (supplied fields not ensured to match)"
return ret
obj = __salt__["infoblox.get_host_advanced"](name=name, **api_opts)
diff = __salt__["infoblox.diff_objects"](data, obj)
if not diff:
ret["result"] = True
ret["comment"] = (
"supplied fields already updated (note: removing fields might not"
" update)"
)
return ret
if diff:
ret["changes"] = {"diff": diff}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to update infoblox record"
return ret
# replace func:nextavailableip with current ip address if in range
# get list of ipaddresses that are defined.
obj_addrs = []
if "ipv4addrs" in obj:
for addr in obj["ipv4addrs"]:
if "ipv4addr" in addr:
obj_addrs.append(addr["ipv4addr"])
if "ipv6addrs" in obj:
for addr in obj["ipv6addrs"]:
if "ipv6addr" in addr:
obj_addrs.append(addr["ipv6addr"])
# replace func:nextavailableip: if an ip address is already found in that range.
if "ipv4addrs" in data:
for addr in data["ipv4addrs"]:
if "ipv4addr" in addr:
addrobj = addr["ipv4addr"]
if addrobj.startswith("func:nextavailableip:"):
found_matches = 0
for ip in obj_addrs:
if __salt__["infoblox.is_ipaddr_in_ipfunc_range"](
ip, addrobj
):
addr["ipv4addr"] = ip
found_matches += 1
if found_matches > 1:
ret["comment"] = (
"infoblox record cant updated because ipaddress {}"
" matches multiple func:nextavailableip".format(ip)
)
ret["result"] = False
return ret
new_obj = __salt__["infoblox.update_object"](
obj["_ref"], data=data, **api_opts
)
ret["result"] = True
ret["comment"] = (
"infoblox record fields updated (note: removing fields might not"
" update)"
)
# ret['changes'] = {'diff': diff }
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to create infoblox record {}".format(name)
return ret
new_obj_ref = __salt__["infoblox.create_host"](data=data, **api_opts)
new_obj = __salt__["infoblox.get_host"](name=name, **api_opts)
ret["result"] = True
ret["comment"] = "infoblox record created"
ret["changes"] = {"old": "None", "new": {"_ref": new_obj_ref, "data": new_obj}}
return ret
def absent(name=None, ipv4addr=None, mac=None, **api_opts):
"""
Ensure the host with the given Name ipv4addr or mac is removed.
State example:
.. code-block:: yaml
infoblox_host_record.absent:
- name: hostname.of.record.to.remove
infoblox_host_record.absent:
- name:
- ipv4addr: 192.168.0.1
infoblox_host_record.absent:
- name:
- mac: 12:02:12:31:23:43
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
obj = __salt__["infoblox.get_host"](
name=name, ipv4addr=ipv4addr, mac=mac, **api_opts
)
if not obj:
ret["result"] = True
ret["comment"] = "infoblox already removed"
return ret
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"old": obj, "new": "absent"}
return ret
if __salt__["infoblox.delete_host"](name=name, mac=mac, **api_opts):
ret["result"] = True
ret["changes"] = {"old": obj, "new": "absent"}
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/infoblox_host_record.py | 0.685318 | 0.387632 | infoblox_host_record.py | pypi |
import copy
import requests
import salt.utils.json
from salt.utils.dictdiffer import DictDiffer
def __virtual__():
"""
Only load if grafana v2.0 is configured.
"""
if __salt__["config.get"]("grafana_version", 1) == 2:
return True
return (False, "Not configured for grafana_version 2")
_DEFAULT_DASHBOARD_PILLAR = "grafana_dashboards:default"
_DEFAULT_PANEL_PILLAR = "grafana_panels:default"
_DEFAULT_ROW_PILLAR = "grafana_rows:default"
_PINNED_ROWS_PILLAR = "grafana_pinned_rows"
def present(
name,
base_dashboards_from_pillar=None,
base_panels_from_pillar=None,
base_rows_from_pillar=None,
dashboard=None,
profile="grafana",
):
"""
Ensure the grafana dashboard exists and is managed.
name
Name of the grafana dashboard.
base_dashboards_from_pillar
A pillar key that contains a list of dashboards to inherit from
base_panels_from_pillar
A pillar key that contains a list of panels to inherit from
base_rows_from_pillar
A pillar key that contains a list of rows to inherit from
dashboard
A dict that defines a dashboard that should be managed.
profile
A pillar key or dict that contains grafana information
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
base_dashboards_from_pillar = base_dashboards_from_pillar or []
base_panels_from_pillar = base_panels_from_pillar or []
base_rows_from_pillar = base_rows_from_pillar or []
dashboard = dashboard or {}
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
# Add pillar keys for default configuration
base_dashboards_from_pillar = [
_DEFAULT_DASHBOARD_PILLAR
] + base_dashboards_from_pillar
base_panels_from_pillar = [_DEFAULT_PANEL_PILLAR] + base_panels_from_pillar
base_rows_from_pillar = [_DEFAULT_ROW_PILLAR] + base_rows_from_pillar
# Build out all dashboard fields
new_dashboard = _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret)
new_dashboard["title"] = name
rows = new_dashboard.get("rows", [])
for i, row in enumerate(rows):
rows[i] = _inherited_row(row, base_rows_from_pillar, ret)
for row in rows:
panels = row.get("panels", [])
for i, panel in enumerate(panels):
panels[i] = _inherited_panel(panel, base_panels_from_pillar, ret)
_auto_adjust_panel_spans(new_dashboard)
_ensure_panel_ids(new_dashboard)
_ensure_annotations(new_dashboard)
# Create dashboard if it does not exist
url = "db/{}".format(name)
old_dashboard = _get(url, profile)
if not old_dashboard:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dashboard {} is set to be created.".format(name)
return ret
response = _update(new_dashboard, profile)
if response.get("status") == "success":
ret["comment"] = "Dashboard {} created.".format(name)
ret["changes"]["new"] = "Dashboard {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create dashboard {}, response={}".format(
name, response
)
return ret
# Add unmanaged rows to the dashboard. They appear at the top if they are
# marked as pinned. They appear at the bottom otherwise.
managed_row_titles = [row.get("title") for row in new_dashboard.get("rows", [])]
new_rows = new_dashboard.get("rows", [])
for old_row in old_dashboard.get("rows", []):
if old_row.get("title") not in managed_row_titles:
new_rows.append(copy.deepcopy(old_row))
_ensure_pinned_rows(new_dashboard)
_ensure_panel_ids(new_dashboard)
# Update dashboard if it differs
dashboard_diff = DictDiffer(_cleaned(new_dashboard), _cleaned(old_dashboard))
updated_needed = (
dashboard_diff.changed() or dashboard_diff.added() or dashboard_diff.removed()
)
if updated_needed:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dashboard {} is set to be updated, changes={}".format(
name,
salt.utils.json.dumps(
_dashboard_diff(_cleaned(new_dashboard), _cleaned(old_dashboard)),
indent=4,
),
)
return ret
response = _update(new_dashboard, profile)
if response.get("status") == "success":
updated_dashboard = _get(url, profile)
dashboard_diff = DictDiffer(
_cleaned(updated_dashboard), _cleaned(old_dashboard)
)
ret["comment"] = "Dashboard {} updated.".format(name)
ret["changes"] = _dashboard_diff(
_cleaned(new_dashboard), _cleaned(old_dashboard)
)
else:
ret["result"] = False
ret["comment"] = "Failed to update dashboard {}, response={}".format(
name, response
)
return ret
ret["comment"] = "Dashboard present"
return ret
def absent(name, profile="grafana"):
"""
Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
profile
A pillar key or dict that contains grafana information
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
url = "db/{}".format(name)
existing_dashboard = _get(url, profile)
if existing_dashboard:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dashboard {} is set to be deleted.".format(name)
return ret
_delete(url, profile)
ret["comment"] = "Dashboard {} deleted.".format(name)
ret["changes"]["new"] = "Dashboard {} deleted.".format(name)
return ret
ret["comment"] = "Dashboard absent"
return ret
_IGNORED_DASHBOARD_FIELDS = [
"id",
"originalTitle",
"version",
]
_IGNORED_ROW_FIELDS = []
_IGNORED_PANEL_FIELDS = [
"grid",
"mode",
"tooltip",
]
_IGNORED_TARGET_FIELDS = [
"textEditor",
]
def _cleaned(_dashboard):
"""Return a copy without fields that can differ."""
dashboard = copy.deepcopy(_dashboard)
for ignored_dashboard_field in _IGNORED_DASHBOARD_FIELDS:
dashboard.pop(ignored_dashboard_field, None)
for row in dashboard.get("rows", []):
for ignored_row_field in _IGNORED_ROW_FIELDS:
row.pop(ignored_row_field, None)
for i, panel in enumerate(row.get("panels", [])):
for ignored_panel_field in _IGNORED_PANEL_FIELDS:
panel.pop(ignored_panel_field, None)
for target in panel.get("targets", []):
for ignored_target_field in _IGNORED_TARGET_FIELDS:
target.pop(ignored_target_field, None)
row["panels"][i] = _stripped(panel)
return dashboard
def _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret):
"""Return a dashboard with properties from parents."""
base_dashboards = []
for base_dashboard_from_pillar in base_dashboards_from_pillar:
base_dashboard = __salt__["pillar.get"](base_dashboard_from_pillar)
if base_dashboard:
base_dashboards.append(base_dashboard)
elif base_dashboard_from_pillar != _DEFAULT_DASHBOARD_PILLAR:
ret.setdefault("warnings", [])
warning_message = 'Cannot find dashboard pillar "{}".'.format(
base_dashboard_from_pillar
)
if warning_message not in ret["warnings"]:
ret["warnings"].append(warning_message)
base_dashboards.append(dashboard)
result_dashboard = {}
tags = set()
for dashboard in base_dashboards:
tags.update(dashboard.get("tags", []))
result_dashboard.update(dashboard)
result_dashboard["tags"] = list(tags)
return result_dashboard
def _inherited_row(row, base_rows_from_pillar, ret):
"""Return a row with properties from parents."""
base_rows = []
for base_row_from_pillar in base_rows_from_pillar:
base_row = __salt__["pillar.get"](base_row_from_pillar)
if base_row:
base_rows.append(base_row)
elif base_row_from_pillar != _DEFAULT_ROW_PILLAR:
ret.setdefault("warnings", [])
warning_message = 'Cannot find row pillar "{}".'.format(
base_row_from_pillar
)
if warning_message not in ret["warnings"]:
ret["warnings"].append(warning_message)
base_rows.append(row)
result_row = {}
for row in base_rows:
result_row.update(row)
return result_row
def _inherited_panel(panel, base_panels_from_pillar, ret):
"""Return a panel with properties from parents."""
base_panels = []
for base_panel_from_pillar in base_panels_from_pillar:
base_panel = __salt__["pillar.get"](base_panel_from_pillar)
if base_panel:
base_panels.append(base_panel)
elif base_panel_from_pillar != _DEFAULT_PANEL_PILLAR:
ret.setdefault("warnings", [])
warning_message = 'Cannot find panel pillar "{}".'.format(
base_panel_from_pillar
)
if warning_message not in ret["warnings"]:
ret["warnings"].append(warning_message)
base_panels.append(panel)
result_panel = {}
for panel in base_panels:
result_panel.update(panel)
return result_panel
_FULL_LEVEL_SPAN = 12
_DEFAULT_PANEL_SPAN = 2.5
def _auto_adjust_panel_spans(dashboard):
"""Adjust panel spans to take up the available width.
For each group of panels that would be laid out on the same level, scale up
the unspecified panel spans to fill up the level.
"""
for row in dashboard.get("rows", []):
levels = []
current_level = []
levels.append(current_level)
for panel in row.get("panels", []):
current_level_span = sum(
panel.get("span", _DEFAULT_PANEL_SPAN) for panel in current_level
)
span = panel.get("span", _DEFAULT_PANEL_SPAN)
if current_level_span + span > _FULL_LEVEL_SPAN:
current_level = [panel]
levels.append(current_level)
else:
current_level.append(panel)
for level in levels:
specified_panels = [panel for panel in level if "span" in panel]
unspecified_panels = [panel for panel in level if "span" not in panel]
if not unspecified_panels:
continue
specified_span = sum(panel["span"] for panel in specified_panels)
available_span = _FULL_LEVEL_SPAN - specified_span
auto_span = float(available_span) / len(unspecified_panels)
for panel in unspecified_panels:
panel["span"] = auto_span
def _ensure_pinned_rows(dashboard):
"""Pin rows to the top of the dashboard."""
pinned_row_titles = __salt__["pillar.get"](_PINNED_ROWS_PILLAR)
if not pinned_row_titles:
return
pinned_row_titles_lower = []
for title in pinned_row_titles:
pinned_row_titles_lower.append(title.lower())
rows = dashboard.get("rows", [])
pinned_rows = []
for i, row in enumerate(rows):
if row.get("title", "").lower() in pinned_row_titles_lower:
del rows[i]
pinned_rows.append(row)
rows = pinned_rows + rows
def _ensure_panel_ids(dashboard):
"""Assign panels auto-incrementing IDs."""
panel_id = 1
for row in dashboard.get("rows", []):
for panel in row.get("panels", []):
panel["id"] = panel_id
panel_id += 1
def _ensure_annotations(dashboard):
"""Explode annotation_tags into annotations."""
if "annotation_tags" not in dashboard:
return
tags = dashboard["annotation_tags"]
annotations = {
"enable": True,
"list": [],
}
for tag in tags:
annotations["list"].append(
{
"datasource": "graphite",
"enable": False,
"iconColor": "#C0C6BE",
"iconSize": 13,
"lineColor": "rgba(255, 96, 96, 0.592157)",
"name": tag,
"showLine": True,
"tags": tag,
}
)
del dashboard["annotation_tags"]
dashboard["annotations"] = annotations
def _get(url, profile):
"""Get a specific dashboard."""
request_url = "{}/api/dashboards/{}".format(profile.get("grafana_url"), url)
response = requests.get(
request_url,
headers={
"Accept": "application/json",
"Authorization": "Bearer {}".format(profile.get("grafana_token")),
},
timeout=profile.get("grafana_timeout", 3),
)
data = response.json()
if data.get("message") == "Not found":
return None
if "dashboard" not in data:
return None
return data["dashboard"]
def _delete(url, profile):
"""Delete a specific dashboard."""
request_url = "{}/api/dashboards/{}".format(profile.get("grafana_url"), url)
response = requests.delete(
request_url,
headers={
"Accept": "application/json",
"Authorization": "Bearer {}".format(profile.get("grafana_token")),
},
timeout=profile.get("grafana_timeout"),
)
data = response.json()
return data
def _update(dashboard, profile):
"""Update a specific dashboard."""
payload = {"dashboard": dashboard, "overwrite": True}
request_url = "{}/api/dashboards/db".format(profile.get("grafana_url"))
response = requests.post(
request_url,
headers={"Authorization": "Bearer {}".format(profile.get("grafana_token"))},
json=payload,
)
return response.json()
def _dashboard_diff(_new_dashboard, _old_dashboard):
"""Return a dictionary of changes between dashboards."""
diff = {}
# Dashboard diff
new_dashboard = copy.deepcopy(_new_dashboard)
old_dashboard = copy.deepcopy(_old_dashboard)
dashboard_diff = DictDiffer(new_dashboard, old_dashboard)
diff["dashboard"] = _stripped(
{
"changed": list(dashboard_diff.changed()) or None,
"added": list(dashboard_diff.added()) or None,
"removed": list(dashboard_diff.removed()) or None,
}
)
# Row diff
new_rows = new_dashboard.get("rows", [])
old_rows = old_dashboard.get("rows", [])
new_rows_by_title = {}
old_rows_by_title = {}
for row in new_rows:
if "title" in row:
new_rows_by_title[row["title"]] = row
for row in old_rows:
if "title" in row:
old_rows_by_title[row["title"]] = row
rows_diff = DictDiffer(new_rows_by_title, old_rows_by_title)
diff["rows"] = _stripped(
{
"added": list(rows_diff.added()) or None,
"removed": list(rows_diff.removed()) or None,
}
)
for changed_row_title in rows_diff.changed():
old_row = old_rows_by_title[changed_row_title]
new_row = new_rows_by_title[changed_row_title]
row_diff = DictDiffer(new_row, old_row)
diff["rows"].setdefault("changed", {})
diff["rows"]["changed"][changed_row_title] = _stripped(
{
"changed": list(row_diff.changed()) or None,
"added": list(row_diff.added()) or None,
"removed": list(row_diff.removed()) or None,
}
)
# Panel diff
old_panels_by_id = {}
new_panels_by_id = {}
for row in old_dashboard.get("rows", []):
for panel in row.get("panels", []):
if "id" in panel:
old_panels_by_id[panel["id"]] = panel
for row in new_dashboard.get("rows", []):
for panel in row.get("panels", []):
if "id" in panel:
new_panels_by_id[panel["id"]] = panel
panels_diff = DictDiffer(new_panels_by_id, old_panels_by_id)
diff["panels"] = _stripped(
{
"added": list(panels_diff.added()) or None,
"removed": list(panels_diff.removed()) or None,
}
)
for changed_panel_id in panels_diff.changed():
old_panel = old_panels_by_id[changed_panel_id]
new_panel = new_panels_by_id[changed_panel_id]
panels_diff = DictDiffer(new_panel, old_panel)
diff["panels"].setdefault("changed", {})
diff["panels"]["changed"][changed_panel_id] = _stripped(
{
"changed": list(panels_diff.changed()) or None,
"added": list(panels_diff.added()) or None,
"removed": list(panels_diff.removed()) or None,
}
)
return diff
def _stripped(d):
"""Strip falsey entries."""
ret = {}
for k, v in d.items():
if v:
ret[k] = v
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grafana_dashboard.py | 0.590189 | 0.19309 | grafana_dashboard.py | pypi |
def __virtual__():
"""
Only load if the XMPP module is available in __salt__
"""
if "xmpp.send_msg" in __salt__:
return "xmpp"
return (False, "xmpp module could not be loaded")
def send_msg(name, recipient, profile):
"""
Send a message to an XMPP user
.. code-block:: yaml
server-warning-message:
xmpp.send_msg:
- name: 'This is a server warning message'
- profile: my-xmpp-account
- recipient: admins@xmpp.example.com/salt
name
The message to send to the XMPP user
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Need to send message to {}: {}".format(
recipient,
name,
)
return ret
__salt__["xmpp.send_msg_multi"](
message=name,
recipients=[recipient],
profile=profile,
)
ret["result"] = True
ret["comment"] = "Sent message to {}: {}".format(recipient, name)
return ret
def send_msg_multi(name, profile, recipients=None, rooms=None):
"""
Send a message to an list of recipients or rooms
.. code-block:: yaml
server-warning-message:
xmpp.send_msg:
- name: 'This is a server warning message'
- profile: my-xmpp-account
- recipients:
- admins@xmpp.example.com/salt
- rooms:
- qa@conference.xmpp.example.com
name
The message to send to the XMPP user
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if recipients is None and rooms is None:
ret["comment"] = "Recipients and rooms are empty, no need to send"
return ret
comment = ""
if recipients:
comment += " users {}".format(recipients)
if rooms:
comment += " rooms {}".format(rooms)
comment += ", message: {}".format(name)
if __opts__["test"]:
ret["comment"] = "Need to send" + comment
return ret
__salt__["xmpp.send_msg_multi"](
message=name,
recipients=recipients,
rooms=rooms,
profile=profile,
)
ret["result"] = True
ret["comment"] = "Sent message to" + comment
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/xmpp.py | 0.556882 | 0.16228 | xmpp.py | pypi |
import collections
def __virtual__():
"""
Only load if the mssql module is present
"""
if "mssql.version" in __salt__:
return True
return (False, "mssql module could not be loaded")
def _normalize_options(options):
if type(options) in [dict, collections.OrderedDict]:
return ["{}={}".format(k, v) for k, v in options.items()]
if type(options) is list and (not options or type(options[0]) is str):
return options
# Invalid options
if type(options) is not list or type(options[0]) not in [
dict,
collections.OrderedDict,
]:
return []
return [o for d in options for o in _normalize_options(d)]
def present(
name, login=None, domain=None, database=None, roles=None, options=None, **kwargs
):
"""
Checks existence of the named user.
If not present, creates the user with the specified roles and options.
name
The name of the user to manage
login
If not specified, will be created WITHOUT LOGIN
domain
Creates a Windows authentication user.
Needs to be NetBIOS domain or hostname
database
The database of the user (not the login)
roles
Add this user to all the roles in the list
options
Can be a list of strings, a dictionary, or a list of dictionaries
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if domain and not login:
ret["result"] = False
ret["comment"] = "domain cannot be set without login"
return ret
if __salt__["mssql.user_exists"](name, domain=domain, database=database, **kwargs):
ret[
"comment"
] = "User {} is already present (Not going to try to set its roles or options)".format(
name
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "User {} is set to be added".format(name)
return ret
user_created = __salt__["mssql.user_create"](
name,
login=login,
domain=domain,
database=database,
roles=roles,
options=_normalize_options(options),
**kwargs
)
if (
user_created is not True
): # Non-empty strings are also evaluated to True, so we cannot use if not user_created:
ret["result"] = False
ret["comment"] += "User {} failed to be added: {}".format(name, user_created)
return ret
ret["comment"] += "User {} has been added".format(name)
ret["changes"][name] = "Present"
return ret
def absent(name, **kwargs):
"""
Ensure that the named user is absent
name
The username of the user to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not __salt__["mssql.user_exists"](name):
ret["comment"] = "User {} is not present".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "User {} is set to be removed".format(name)
return ret
if __salt__["mssql.user_remove"](name, **kwargs):
ret["comment"] = "User {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
# else:
ret["result"] = False
ret["comment"] = "User {} failed to be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mssql_user.py | 0.532425 | 0.187449 | mssql_user.py | pypi |
__virtualname__ = "neutron_subnet"
def __virtual__():
if "neutronng.list_subnets" in __salt__:
return __virtualname__
return (
False,
"The neutronng execution module failed to load: shade python module is not available",
)
def present(name, auth=None, **kwargs):
"""
Ensure a subnet exists and is up-to-date
name
Name of the subnet
network_name_or_id
The unique name or ID of the attached network.
If a non-unique name is supplied, an exception is raised.
allocation_pools
A list of dictionaries of the start and end addresses
for the allocation pools
gateway_ip
The gateway IP address.
dns_nameservers
A list of DNS name servers for the subnet.
host_routes
A list of host route dictionaries for the subnet.
ipv6_ra_mode
IPv6 Router Advertisement mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
ipv6_address_mode
IPv6 address mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["neutronng.setup_clouds"](auth)
kwargs["subnet_name"] = name
subnet = __salt__["neutronng.subnet_get"](name=name)
if subnet is None:
if __opts__["test"]:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Subnet will be created."
return ret
new_subnet = __salt__["neutronng.subnet_create"](**kwargs)
ret["changes"] = new_subnet
ret["comment"] = "Created subnet"
return ret
changes = __salt__["neutronng.compare_changes"](subnet, **kwargs)
if changes:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Project will be updated."
return ret
# update_subnet does not support changing cidr,
# so we have to delete and recreate the subnet in this case.
if "cidr" in changes or "tenant_id" in changes:
__salt__["neutronng.subnet_delete"](name=name)
new_subnet = __salt__["neutronng.subnet_create"](**kwargs)
ret["changes"] = new_subnet
ret["comment"] = "Deleted and recreated subnet"
return ret
__salt__["neutronng.subnet_update"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated subnet"
return ret
def absent(name, auth=None):
"""
Ensure a subnet does not exists
name
Name of the subnet
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
__salt__["neutronng.setup_clouds"](auth)
subnet = __salt__["neutronng.subnet_get"](name=name)
if subnet:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"id": subnet.id}
ret["comment"] = "Project will be deleted."
return ret
__salt__["neutronng.subnet_delete"](name=subnet)
ret["changes"]["id"] = name
ret["comment"] = "Deleted subnet"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/neutron_subnet.py | 0.713931 | 0.22563 | neutron_subnet.py | pypi |
import logging
log = logging.getLogger(__name__)
def _check_cron(user, path, mask, cmd):
"""
Return the changes
"""
arg_mask = mask.split(",")
arg_mask.sort()
lst = __salt__["incron.list_tab"](user)
if cmd.endswith("\n"):
cmd = cmd[:-1]
for cron in lst["crons"]:
if path == cron["path"] and cron["cmd"] == cmd:
cron_mask = cron["mask"].split(",")
cron_mask.sort()
if cron_mask == arg_mask:
return "present"
if any([x in cron_mask for x in arg_mask]):
return "update"
return "absent"
def _get_cron_info():
"""
Returns the proper group owner and path to the incron directory
"""
owner = "root"
if __grains__["os"] == "FreeBSD":
group = "wheel"
crontab_dir = "/var/spool/incron"
elif __grains__["os"] == "OpenBSD":
group = "crontab"
crontab_dir = "/var/spool/incron"
elif __grains__.get("os_family") == "Solaris":
group = "root"
crontab_dir = "/var/spool/incron"
else:
group = "root"
crontab_dir = "/var/spool/incron"
return owner, group, crontab_dir
def present(name, path, mask, cmd, user="root"):
"""
Verifies that the specified incron job is present for the specified user.
For more advanced information about what exactly can be set in the cron
timing parameters, check your incron system's documentation. Most Unix-like
systems' incron documentation can be found via the incrontab man page:
``man 5 incrontab``.
name
Unique comment describing the entry
path
The path that should be watched
user
The name of the user who's crontab needs to be modified, defaults to
the root user
mask
The mask of events that should be monitored for
cmd
The cmd that should be executed
"""
mask = ",".join(mask)
ret = {"changes": {}, "comment": "", "name": name, "result": True}
if __opts__["test"]:
status = _check_cron(user, path, mask, cmd)
ret["result"] = None
if status == "absent":
ret["comment"] = "Incron {} is set to be added".format(name)
elif status == "present":
ret["result"] = True
ret["comment"] = "Incron {} already present".format(name)
elif status == "update":
ret["comment"] = "Incron {} is set to be updated".format(name)
return ret
data = __salt__["incron.set_job"](user=user, path=path, mask=mask, cmd=cmd)
if data == "present":
ret["comment"] = "Incron {} already present".format(name)
return ret
if data == "new":
ret["comment"] = "Incron {} added to {}'s incrontab".format(name, user)
ret["changes"] = {user: name}
return ret
if data == "updated":
ret["comment"] = "Incron {} updated".format(name)
ret["changes"] = {user: name}
return ret
ret["comment"] = "Incron {} for user {} failed to commit with error \n{}".format(
name, user, data
)
ret["result"] = False
return ret
def absent(name, path, mask, cmd, user="root"):
"""
Verifies that the specified incron job is absent for the specified user; only
the name is matched when removing a incron job.
name
Unique comment describing the entry
path
The path that should be watched
user
The name of the user who's crontab needs to be modified, defaults to
the root user
mask
The mask of events that should be monitored for
cmd
The cmd that should be executed
"""
mask = ",".join(mask)
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
if __opts__["test"]:
status = _check_cron(user, path, mask, cmd)
ret["result"] = None
if status == "absent":
ret["result"] = True
ret["comment"] = "Incron {} is absent".format(name)
elif status == "present" or status == "update":
ret["comment"] = "Incron {} is set to be removed".format(name)
return ret
data = __salt__["incron.rm_job"](user=user, path=path, mask=mask, cmd=cmd)
if data == "absent":
ret["comment"] = "Incron {} already absent".format(name)
return ret
if data == "removed":
ret["comment"] = "Incron {} removed from {}'s crontab".format(name, user)
ret["changes"] = {user: name}
return ret
ret["comment"] = "Incron {} for user {} failed to commit with error {}".format(
name, user, data
)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/incron.py | 0.508544 | 0.191933 | incron.py | pypi |
import salt.utils.path
def __virtual__():
"""
Only work with nfs tools installed
"""
cmd = "exportfs"
if salt.utils.path.which(cmd):
return bool(cmd)
return (
False,
"The nfs_exports state module failed to load: "
"the exportfs binary is not in the path",
)
def present(name, clients=None, hosts=None, options=None, exports="/etc/exports"):
"""
Ensure that the named export is present with the given options
name
The export path to configure
clients
A list of hosts and the options applied to them.
This option may not be used in combination with
the 'hosts' or 'options' shortcuts.
.. code-block:: yaml
- clients:
# First export
- hosts: '10.0.2.0/24'
options:
- 'rw'
# Second export
- hosts: '*.example.com'
options:
- 'ro'
- 'subtree_check'
hosts
A string matching a number of hosts, for example:
.. code-block:: yaml
hosts: '10.0.2.123'
hosts: '10.0.2.0/24'
hosts: 'minion1.example.com'
hosts: '*.example.com'
hosts: '*'
options
A list of NFS options, for example:
.. code-block:: yaml
options:
- 'rw'
- 'subtree_check'
"""
path = name
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if not clients:
if not hosts:
ret["result"] = False
ret["comment"] = "Either 'clients' or 'hosts' must be defined"
return ret
# options being None is handled by add_export()
clients = [{"hosts": hosts, "options": options}]
old = __salt__["nfs3.list_exports"](exports)
if path in old:
if old[path] == clients:
ret["result"] = True
ret["comment"] = "Export {} already configured".format(path)
return ret
ret["changes"]["new"] = clients
ret["changes"]["old"] = old[path]
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Export {} would be changed".format(path)
return ret
__salt__["nfs3.del_export"](exports, path)
else:
ret["changes"]["old"] = None
ret["changes"]["new"] = clients
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Export {} would be added".format(path)
return ret
add_export = __salt__["nfs3.add_export"]
for exp in clients:
add_export(exports, path, exp["hosts"], exp["options"])
ret["changes"]["new"] = clients
try_reload = __salt__["nfs3.reload_exports"]()
ret["comment"] = try_reload["stderr"]
ret["result"] = try_reload["result"]
return ret
def absent(name, exports="/etc/exports"):
"""
Ensure that the named path is not exported
name
The export path to remove
"""
path = name
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
old = __salt__["nfs3.list_exports"](exports)
if path in old:
if __opts__["test"]:
ret["comment"] = "Export {} would be removed".format(path)
ret["changes"][path] = old[path]
ret["result"] = None
return ret
__salt__["nfs3.del_export"](exports, path)
try_reload = __salt__["nfs3.reload_exports"]()
if not try_reload["result"]:
ret["comment"] = try_reload["stderr"]
else:
ret["comment"] = "Export {} removed".format(path)
ret["result"] = try_reload["result"]
ret["changes"][path] = old[path]
else:
ret["comment"] = "Export {} already absent".format(path)
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/nfs_export.py | 0.460289 | 0.210219 | nfs_export.py | pypi |
import inspect
import logging
import salt.exceptions
log = logging.getLogger(__name__)
def create_alert(name=None, api_key=None, reason=None, action_type="Create"):
"""
Create an alert in OpsGenie. Example usage with Salt's requisites and other
global state arguments could be found above.
Required Parameters:
api_key
It's the API Key you've copied while adding integration in OpsGenie.
reason
It will be used as alert's default message in OpsGenie.
Optional Parameters:
name
It will be used as alert's alias. If you want to use the close
functionality you must provide name field for both states like
in above case.
action_type
OpsGenie supports the default values Create/Close for action_type.
You can customize this field with OpsGenie's custom actions for
other purposes like adding notes or acknowledging alerts.
"""
_, _, _, values = inspect.getargvalues(inspect.currentframe())
log.info("Arguments values: %s", values)
ret = {"result": "", "name": "", "changes": "", "comment": ""}
if api_key is None or reason is None:
raise salt.exceptions.SaltInvocationError("API Key or Reason cannot be None.")
if __opts__["test"] is True:
ret[
"comment"
] = 'Test: {} alert request will be processed using the API Key="{}".'.format(
action_type, api_key
)
# Return ``None`` when running with ``test=true``.
ret["result"] = None
return ret
response_status_code, response_text = __salt__["opsgenie.post_data"](
api_key=api_key, name=name, reason=reason, action_type=action_type
)
if 200 <= response_status_code < 300:
log.info(
"POST Request has succeeded with message: %s status code: %s",
response_text,
response_status_code,
)
ret[
"comment"
] = 'Test: {} alert request will be processed using the API Key="{}".'.format(
action_type, api_key
)
ret["result"] = True
else:
log.error(
"POST Request has failed with error: %s status code: %s",
response_text,
response_status_code,
)
ret["result"] = False
return ret
def close_alert(
name=None, api_key=None, reason="Conditions are met.", action_type="Close"
):
"""
Close an alert in OpsGenie. It's a wrapper function for create_alert.
Example usage with Salt's requisites and other global state arguments
could be found above.
Required Parameters:
name
It will be used as alert's alias. If you want to use the close
functionality you must provide name field for both states like
in above case.
Optional Parameters:
api_key
It's the API Key you've copied while adding integration in OpsGenie.
reason
It will be used as alert's default message in OpsGenie.
action_type
OpsGenie supports the default values Create/Close for action_type.
You can customize this field with OpsGenie's custom actions for
other purposes like adding notes or acknowledging alerts.
"""
if name is None:
raise salt.exceptions.SaltInvocationError("Name cannot be None.")
return create_alert(name, api_key, reason, action_type) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/opsgenie.py | 0.728652 | 0.264109 | opsgenie.py | pypi |
import copy
import datetime
import logging
import math
import sys
import salt.utils.dictupdate as dictupdate
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(name)s %(levelname)s %(message)s",
stream=sys.stdout,
)
log = logging.getLogger()
class GsiNotUpdatableError(Exception):
"""Raised when a global secondary index cannot be updated."""
def __virtual__():
"""
Only load if boto_dynamodb is available.
"""
if "boto_dynamodb.exists" in __salt__:
return "boto_dynamodb"
return (False, "boto_dynamodb module could not be loaded")
def present(
name=None,
table_name=None,
region=None,
key=None,
keyid=None,
profile=None,
read_capacity_units=None,
write_capacity_units=None,
alarms=None,
alarms_from_pillar="boto_dynamodb_alarms",
hash_key=None,
hash_key_data_type=None,
range_key=None,
range_key_data_type=None,
local_indexes=None,
global_indexes=None,
backup_configs_from_pillars="boto_dynamodb_backup_configs",
):
"""
Ensure the DynamoDB table exists. Table throughput can be updated after
table creation.
Global secondary indexes (GSIs) are managed with some exceptions:
- If a GSI deletion is detected, a failure will occur (deletes should be
done manually in the AWS console).
- If multiple GSIs are added in a single Salt call, a failure will occur
(boto supports one creation at a time). Note that this only applies after
table creation; multiple GSIs can be created during table creation.
- Updates to existing GSIs are limited to read/write capacity only
(DynamoDB limitation).
name
Name of the DynamoDB table
table_name
Name of the DynamoDB table (deprecated)
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
read_capacity_units
The read throughput for this table
write_capacity_units
The write throughput for this table
hash_key
The name of the attribute that will be used as the hash key
for this table
hash_key_data_type
The DynamoDB datatype of the hash key
range_key
The name of the attribute that will be used as the range key
for this table
range_key_data_type
The DynamoDB datatype of the range key
local_indexes
The local indexes you would like to create
global_indexes
The global indexes you would like to create
backup_configs_from_pillars
Pillars to use to configure DataPipeline backups
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if table_name:
ret["warnings"] = [
"boto_dynamodb.present: `table_name` is deprecated."
" Please use `name` instead."
]
ret["name"] = table_name
name = table_name
comments = []
changes_old = {}
changes_new = {}
# Ensure DynamoDB table exists
table_exists = __salt__["boto_dynamodb.exists"](name, region, key, keyid, profile)
if not table_exists:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "DynamoDB table {} would be created.".format(name)
return ret
else:
is_created = __salt__["boto_dynamodb.create_table"](
name,
region,
key,
keyid,
profile,
read_capacity_units,
write_capacity_units,
hash_key,
hash_key_data_type,
range_key,
range_key_data_type,
local_indexes,
global_indexes,
)
if not is_created:
ret["result"] = False
ret["comment"] = "Failed to create table {}".format(name)
_add_changes(ret, changes_old, changes_new)
return ret
comments.append("DynamoDB table {} was successfully created".format(name))
changes_new["table"] = name
changes_new["read_capacity_units"] = read_capacity_units
changes_new["write_capacity_units"] = write_capacity_units
changes_new["hash_key"] = hash_key
changes_new["hash_key_data_type"] = hash_key_data_type
changes_new["range_key"] = range_key
changes_new["range_key_data_type"] = range_key_data_type
changes_new["local_indexes"] = local_indexes
changes_new["global_indexes"] = global_indexes
else:
comments.append("DynamoDB table {} exists".format(name))
# Ensure DynamoDB table provisioned throughput matches
description = __salt__["boto_dynamodb.describe"](name, region, key, keyid, profile)
provisioned_throughput = description.get("Table", {}).get(
"ProvisionedThroughput", {}
)
current_write_capacity_units = provisioned_throughput.get("WriteCapacityUnits")
current_read_capacity_units = provisioned_throughput.get("ReadCapacityUnits")
throughput_matches = (
current_write_capacity_units == write_capacity_units
and current_read_capacity_units == read_capacity_units
)
if not throughput_matches:
if __opts__["test"]:
ret["result"] = None
comments.append("DynamoDB table {} is set to be updated.".format(name))
else:
is_updated = __salt__["boto_dynamodb.update"](
name,
throughput={
"read": read_capacity_units,
"write": write_capacity_units,
},
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not is_updated:
ret["result"] = False
ret["comment"] = "Failed to update table {}".format(name)
_add_changes(ret, changes_old, changes_new)
return ret
comments.append("DynamoDB table {} was successfully updated".format(name))
changes_old["read_capacity_units"] = (current_read_capacity_units,)
changes_old["write_capacity_units"] = (current_write_capacity_units,)
changes_new["read_capacity_units"] = (read_capacity_units,)
changes_new["write_capacity_units"] = (write_capacity_units,)
else:
comments.append("DynamoDB table {} throughput matches".format(name))
provisioned_indexes = description.get("Table", {}).get("GlobalSecondaryIndexes", [])
_ret = _global_indexes_present(
provisioned_indexes,
global_indexes,
changes_old,
changes_new,
comments,
name,
region,
key,
keyid,
profile,
)
if not _ret["result"]:
comments.append(_ret["comment"])
ret["result"] = _ret["result"]
if ret["result"] is False:
ret["comment"] = ",\n".join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
_ret = _alarms_present(
name,
alarms,
alarms_from_pillar,
write_capacity_units,
read_capacity_units,
region,
key,
keyid,
profile,
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
comments.append(_ret["comment"])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
ret["comment"] = ",\n".join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
# Ensure backup datapipeline is present
datapipeline_configs = copy.deepcopy(
__salt__["pillar.get"](backup_configs_from_pillars, [])
)
for config in datapipeline_configs:
datapipeline_ret = _ensure_backup_datapipeline_present(
name=name,
schedule_name=config["name"],
period=config["period"],
utc_hour=config["utc_hour"],
s3_base_location=config["s3_base_location"],
)
# Add comments and changes if successful changes were made (True for live mode,
# None for test mode).
if datapipeline_ret["result"] in [True, None]:
ret["result"] = datapipeline_ret["result"]
comments.append(datapipeline_ret["comment"])
if datapipeline_ret.get("changes"):
ret["changes"]["backup_datapipeline_{}".format(config["name"])] = (
datapipeline_ret.get("changes"),
)
else:
ret["comment"] = ",\n".join([ret["comment"], datapipeline_ret["comment"]])
_add_changes(ret, changes_old, changes_new)
return ret
ret["comment"] = ",\n".join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
def _add_changes(ret, changes_old, changes_new):
if changes_old:
ret["changes"]["old"] = changes_old
if changes_new:
ret["changes"]["new"] = changes_new
def _global_indexes_present(
provisioned_indexes,
global_indexes,
changes_old,
changes_new,
comments,
name,
region,
key,
keyid,
profile,
):
"""Handles global secondary index for the table present state."""
ret = {"result": True}
if provisioned_indexes:
provisioned_gsi_config = {
index["IndexName"]: index for index in provisioned_indexes
}
else:
provisioned_gsi_config = {}
provisioned_index_names = set(provisioned_gsi_config.keys())
# Map of index name to given Salt config for this run. This loop is complicated
# because global_indexes is made up of OrderedDicts and lists.
gsi_config = {}
if global_indexes:
for index in global_indexes:
# Each index config is a key that maps to a list of OrderedDicts.
index_config = next(iter(index.values()))
index_name = None
for entry in index_config:
# Key by the name field in the index config.
if entry.keys() == ["name"]:
index_name = next(iter(entry.values()))
if not index_name:
ret["result"] = False
ret["comment"] = "Index name not found for table {}".format(name)
return ret
gsi_config[index_name] = index
(
existing_index_names,
new_index_names,
index_names_to_be_deleted,
) = _partition_index_names(provisioned_index_names, set(gsi_config.keys()))
if index_names_to_be_deleted:
ret["result"] = False
ret["comment"] = (
"Deletion of GSIs ({}) is not supported! Please do this "
"manually in the AWS console.".format(", ".join(index_names_to_be_deleted))
)
return ret
elif len(new_index_names) > 1:
ret["result"] = False
ret["comment"] = (
"Creation of multiple GSIs ({}) is not supported due to API "
"limitations. Please create them one at a time.".format(new_index_names)
)
return ret
if new_index_names:
# Given the length check above, new_index_names should have a single element here.
index_name = next(iter(new_index_names))
_add_global_secondary_index(
ret,
name,
index_name,
changes_old,
changes_new,
comments,
gsi_config,
region,
key,
keyid,
profile,
)
if not ret["result"]:
return ret
if existing_index_names:
_update_global_secondary_indexes(
ret,
changes_old,
changes_new,
comments,
existing_index_names,
provisioned_gsi_config,
gsi_config,
name,
region,
key,
keyid,
profile,
)
if not ret["result"]:
return ret
if "global_indexes" not in changes_old and "global_indexes" not in changes_new:
comments.append("All global secondary indexes match")
return ret
def _partition_index_names(provisioned_index_names, index_names):
"""Returns 3 disjoint sets of indexes: existing, to be created, and to be deleted."""
existing_index_names = set()
new_index_names = set()
for name in index_names:
if name in provisioned_index_names:
existing_index_names.add(name)
else:
new_index_names.add(name)
index_names_to_be_deleted = provisioned_index_names - existing_index_names
return existing_index_names, new_index_names, index_names_to_be_deleted
def _add_global_secondary_index(
ret,
name,
index_name,
changes_old,
changes_new,
comments,
gsi_config,
region,
key,
keyid,
profile,
):
"""Updates ret iff there was a failure or in test mode."""
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dynamo table {} will have a GSI added: {}".format(
name, index_name
)
return
changes_new.setdefault("global_indexes", {})
success = __salt__["boto_dynamodb.create_global_secondary_index"](
name,
__salt__["boto_dynamodb.extract_index"](
gsi_config[index_name], global_index=True
),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if success:
comments.append("Created GSI {}".format(index_name))
changes_new["global_indexes"][index_name] = gsi_config[index_name]
else:
ret["result"] = False
ret["comment"] = "Failed to create GSI {}".format(index_name)
def _update_global_secondary_indexes(
ret,
changes_old,
changes_new,
comments,
existing_index_names,
provisioned_gsi_config,
gsi_config,
name,
region,
key,
keyid,
profile,
):
"""Updates ret iff there was a failure or in test mode."""
try:
provisioned_throughputs, index_updates = _determine_gsi_updates(
existing_index_names, provisioned_gsi_config, gsi_config
)
except GsiNotUpdatableError as e:
ret["result"] = False
ret["comment"] = str(e)
return
if index_updates:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Dynamo table {} will have GSIs updated: {}".format(
name, ", ".join(index_updates.keys())
)
return
changes_old.setdefault("global_indexes", {})
changes_new.setdefault("global_indexes", {})
success = __salt__["boto_dynamodb.update_global_secondary_index"](
name,
index_updates,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if success:
comments.append(
"Updated GSIs with new throughputs {}".format(index_updates)
)
for index_name in index_updates:
changes_old["global_indexes"][index_name] = provisioned_throughputs[
index_name
]
changes_new["global_indexes"][index_name] = index_updates[index_name]
else:
ret["result"] = False
ret["comment"] = "Failed to update GSI throughputs {}".format(index_updates)
def _determine_gsi_updates(existing_index_names, provisioned_gsi_config, gsi_config):
# index name -> {'read': <read throughput>, 'write': <write throughput>}
provisioned_throughputs = {}
index_updates = {}
for index_name in existing_index_names:
current_config = provisioned_gsi_config[index_name]
new_config = __salt__["boto_dynamodb.extract_index"](
gsi_config[index_name], global_index=True
).schema()
# The provisioned config will have more fields than the new config, so only consider
# fields in new_config.
for key in new_config:
if key in current_config and key != "ProvisionedThroughput":
new_value = new_config[key]
current_value = current_config[key]
# This is a special case since the Projection value can contain a list (not
# correctly comparable with == as order doesn't matter).
if key == "Projection":
if new_value["ProjectionType"] != current_value["ProjectionType"]:
raise GsiNotUpdatableError("GSI projection types do not match")
elif set(new_value.get("NonKeyAttributes", [])) != set(
current_value.get("NonKeyAttributes", [])
):
raise GsiNotUpdatableError(
"NonKeyAttributes do not match for GSI projection"
)
elif new_value != current_value:
raise GsiNotUpdatableError(
"GSI property {} cannot be updated for index {}".format(
key, index_name
)
)
current_throughput = current_config.get("ProvisionedThroughput")
current_read = current_throughput.get("ReadCapacityUnits")
current_write = current_throughput.get("WriteCapacityUnits")
provisioned_throughputs[index_name] = {
"read": current_read,
"write": current_write,
}
new_throughput = new_config.get("ProvisionedThroughput")
new_read = new_throughput.get("ReadCapacityUnits")
new_write = new_throughput.get("WriteCapacityUnits")
if current_read != new_read or current_write != new_write:
index_updates[index_name] = {"read": new_read, "write": new_write}
return provisioned_throughputs, index_updates
def _alarms_present(
name,
alarms,
alarms_from_pillar,
write_capacity_units,
read_capacity_units,
region,
key,
keyid,
profile,
):
"""helper method for present. ensure that cloudwatch_alarms are set"""
# load data from alarms_from_pillar
tmp = copy.deepcopy(__salt__["config.option"](alarms_from_pillar, {}))
# merge with data from alarms
if alarms:
tmp = dictupdate.update(tmp, alarms)
# set alarms, using boto_cloudwatch_alarm.present
merged_return_value = {"name": name, "result": True, "comment": "", "changes": {}}
for _, info in tmp.items():
# add dynamodb table to name and description
info["name"] = name + " " + info["name"]
info["attributes"]["description"] = (
name + " " + info["attributes"]["description"]
)
# add dimension attribute
info["attributes"]["dimensions"] = {"TableName": [name]}
if (
info["attributes"]["metric"] == "ConsumedWriteCapacityUnits"
and "threshold" not in info["attributes"]
):
info["attributes"]["threshold"] = math.ceil(
write_capacity_units * info["attributes"]["threshold_percent"]
)
del info["attributes"]["threshold_percent"]
# the write_capacity_units is given in unit / second. So we need
# to multiply by the period to get the proper threshold.
# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html
info["attributes"]["threshold"] *= info["attributes"]["period"]
if (
info["attributes"]["metric"] == "ConsumedReadCapacityUnits"
and "threshold" not in info["attributes"]
):
info["attributes"]["threshold"] = math.ceil(
read_capacity_units * info["attributes"]["threshold_percent"]
)
del info["attributes"]["threshold_percent"]
# the read_capacity_units is given in unit / second. So we need
# to multiply by the period to get the proper threshold.
# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html
info["attributes"]["threshold"] *= info["attributes"]["period"]
# set alarm
kwargs = {
"name": info["name"],
"attributes": info["attributes"],
"region": region,
"key": key,
"keyid": keyid,
"profile": profile,
}
results = __states__["boto_cloudwatch_alarm.present"](**kwargs)
if not results["result"]:
merged_return_value["result"] = results["result"]
if results.get("changes", {}) != {}:
merged_return_value["changes"][info["name"]] = results["changes"]
if "comment" in results:
merged_return_value["comment"] += results["comment"]
return merged_return_value
def _ensure_backup_datapipeline_present(
name, schedule_name, period, utc_hour, s3_base_location
):
kwargs = {
"name": "{}-{}-backup".format(name, schedule_name),
"pipeline_objects": {
"DefaultSchedule": {
"name": schedule_name,
"fields": {
"period": period,
"type": "Schedule",
"startDateTime": _next_datetime_with_utc_hour(
name, utc_hour
).isoformat(),
},
},
},
"parameter_values": {
"myDDBTableName": name,
"myOutputS3Loc": "{}/{}/".format(s3_base_location, name),
},
}
return __states__["boto_datapipeline.present"](**kwargs)
def _get_deterministic_value_for_table_name(table_name, max_value):
"""
For a given table_name, returns hash of the table_name limited by max_value.
"""
return hash(table_name) % max_value
def _next_datetime_with_utc_hour(table_name, utc_hour):
"""
Datapipeline API is throttling us, as all the pipelines are started at the same time.
We would like to uniformly distribute the startTime over a 60 minute window.
Return the next future utc datetime where
hour == utc_hour
minute = A value between 0-59 (depending on table name)
second = A value between 0-59 (depending on table name)
"""
today = datetime.date.today()
# The minute and second values generated are deterministic, as we do not want
# pipeline definition to change for every run.
start_date_time = datetime.datetime(
year=today.year,
month=today.month,
day=today.day,
hour=utc_hour,
minute=_get_deterministic_value_for_table_name(table_name, 60),
second=_get_deterministic_value_for_table_name(table_name, 60),
)
if start_date_time < datetime.datetime.utcnow():
one_day = datetime.timedelta(days=1)
start_date_time += one_day
return start_date_time
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure the DynamoDB table does not exist.
name
Name of the DynamoDB table.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_dynamodb.exists"](name, region, key, keyid, profile)
if not exists:
ret["comment"] = "DynamoDB table {} does not exist".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "DynamoDB table {} is set to be deleted".format(name)
ret["result"] = None
return ret
is_deleted = __salt__["boto_dynamodb.delete"](name, region, key, keyid, profile)
if is_deleted:
ret["comment"] = "Deleted DynamoDB table {}".format(name)
ret["changes"].setdefault("old", "Table {} exists".format(name))
ret["changes"].setdefault("new", "Table {} deleted".format(name))
else:
ret["comment"] = "Failed to delete DynamoDB table {}".format(name)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_dynamodb.py | 0.490968 | 0.172555 | boto_dynamodb.py | pypi |
import logging
from functools import wraps
import salt.utils.azurearm
__virtualname__ = "azurearm_dns"
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make this state available if the azurearm_dns module is available.
"""
if "azurearm_dns.zones_list_by_resource_group" in __salt__:
return __virtualname__
return (False, "azurearm_dns module could not be loaded")
def _deprecation_message(function):
"""
Decorator wrapper to warn about azurearm deprecation
"""
@wraps(function)
def wrapped(*args, **kwargs):
salt.utils.versions.warn_until(
"Chlorine",
"The 'azurearm' functionality in Salt has been deprecated and its "
"functionality will be removed in version 3007 in favor of the "
"saltext.azurerm Salt Extension. "
"(https://github.com/salt-extensions/saltext-azurerm)",
category=FutureWarning,
)
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
return ret
return wrapped
@_deprecation_message
def zone_present(
name,
resource_group,
etag=None,
if_match=None,
if_none_match=None,
registration_virtual_networks=None,
resolution_virtual_networks=None,
tags=None,
zone_type="Public",
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 3000
Ensure a DNS zone exists.
:param name:
Name of the DNS zone (without a terminating dot).
:param resource_group:
The resource group assigned to the DNS zone.
:param etag:
The etag of the zone. `Etags <https://docs.microsoft.com/en-us/azure/dns/dns-zones-records#etags>`_ are used
to handle concurrent changes to the same resource safely.
:param if_match:
The etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the last-seen etag
value to prevent accidentally overwritting any concurrent changes.
:param if_none_match:
Set to '*' to allow a new DNS zone to be created, but to prevent updating an existing zone. Other values will
be ignored.
:param registration_virtual_networks:
A list of references to virtual networks that register hostnames in this DNS zone. This is only when zone_type
is Private. (requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
:param resolution_virtual_networks:
A list of references to virtual networks that resolve records in this DNS zone. This is only when zone_type is
Private. (requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
:param tags:
A dictionary of strings can be passed as tag metadata to the DNS zone object.
:param zone_type:
The type of this DNS zone (Public or Private). Possible values include: 'Public', 'Private'. Default value: 'Public'
(requires `azure-mgmt-dns <https://pypi.python.org/pypi/azure-mgmt-dns>`_ >= 2.0.0rc1)
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure DNS zone exists:
azurearm_dns.zone_present:
- name: contoso.com
- resource_group: my_rg
- zone_type: Private
- registration_virtual_networks:
- /subscriptions/{{ sub }}/resourceGroups/my_rg/providers/Microsoft.Network/virtualNetworks/test_vnet
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via connection_auth dictionary!"
return ret
zone = __salt__["azurearm_dns.zone_get"](
name, resource_group, azurearm_log_level="info", **connection_auth
)
if "error" not in zone:
tag_changes = __utils__["dictdiffer.deep_diff"](
zone.get("tags", {}), tags or {}
)
if tag_changes:
ret["changes"]["tags"] = tag_changes
# The zone_type parameter is only accessible in azure-mgmt-dns >=2.0.0rc1
if zone.get("zone_type"):
if zone.get("zone_type").lower() != zone_type.lower():
ret["changes"]["zone_type"] = {
"old": zone["zone_type"],
"new": zone_type,
}
if zone_type.lower() == "private":
# The registration_virtual_networks parameter is only accessible in azure-mgmt-dns >=2.0.0rc1
if registration_virtual_networks and not isinstance(
registration_virtual_networks, list
):
ret["comment"] = (
"registration_virtual_networks must be supplied as a list of"
" VNET ID paths!"
)
return ret
reg_vnets = zone.get("registration_virtual_networks", [])
remote_reg_vnets = sorted(
vnet["id"].lower() for vnet in reg_vnets if "id" in vnet
)
local_reg_vnets = sorted(
vnet.lower() for vnet in registration_virtual_networks or []
)
if local_reg_vnets != remote_reg_vnets:
ret["changes"]["registration_virtual_networks"] = {
"old": remote_reg_vnets,
"new": local_reg_vnets,
}
# The resolution_virtual_networks parameter is only accessible in azure-mgmt-dns >=2.0.0rc1
if resolution_virtual_networks and not isinstance(
resolution_virtual_networks, list
):
ret["comment"] = (
"resolution_virtual_networks must be supplied as a list of VNET"
" ID paths!"
)
return ret
res_vnets = zone.get("resolution_virtual_networks", [])
remote_res_vnets = sorted(
vnet["id"].lower() for vnet in res_vnets if "id" in vnet
)
local_res_vnets = sorted(
vnet.lower() for vnet in resolution_virtual_networks or []
)
if local_res_vnets != remote_res_vnets:
ret["changes"]["resolution_virtual_networks"] = {
"old": remote_res_vnets,
"new": local_res_vnets,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "DNS zone {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "DNS zone {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"resource_group": resource_group,
"etag": etag,
"registration_virtual_networks": registration_virtual_networks,
"resolution_virtual_networks": resolution_virtual_networks,
"tags": tags,
"zone_type": zone_type,
},
}
if __opts__["test"]:
ret["comment"] = "DNS zone {} would be created.".format(name)
ret["result"] = None
return ret
zone_kwargs = kwargs.copy()
zone_kwargs.update(connection_auth)
zone = __salt__["azurearm_dns.zone_create_or_update"](
name=name,
resource_group=resource_group,
etag=etag,
if_match=if_match,
if_none_match=if_none_match,
registration_virtual_networks=registration_virtual_networks,
resolution_virtual_networks=resolution_virtual_networks,
tags=tags,
zone_type=zone_type,
**zone_kwargs
)
if "error" not in zone:
ret["result"] = True
ret["comment"] = "DNS zone {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create DNS zone {}! ({})".format(
name, zone.get("error")
)
return ret
@_deprecation_message
def zone_absent(name, resource_group, connection_auth=None):
"""
.. versionadded:: 3000
Ensure a DNS zone does not exist in the resource group.
:param name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via connection_auth dictionary!"
return ret
zone = __salt__["azurearm_dns.zone_get"](
name, resource_group, azurearm_log_level="info", **connection_auth
)
if "error" in zone:
ret["result"] = True
ret["comment"] = "DNS zone {} was not found.".format(name)
return ret
elif __opts__["test"]:
ret["comment"] = "DNS zone {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": zone,
"new": {},
}
return ret
deleted = __salt__["azurearm_dns.zone_delete"](
name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "DNS zone {} has been deleted.".format(name)
ret["changes"] = {"old": zone, "new": {}}
return ret
ret["comment"] = "Failed to delete DNS zone {}!".format(name)
return ret
@_deprecation_message
def record_set_present(
name,
zone_name,
resource_group,
record_type,
if_match=None,
if_none_match=None,
etag=None,
metadata=None,
ttl=None,
arecords=None,
aaaa_records=None,
mx_records=None,
ns_records=None,
ptr_records=None,
srv_records=None,
txt_records=None,
cname_record=None,
soa_record=None,
caa_records=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 3000
Ensure a record set exists in a DNS zone.
:param name:
The name of the record set, relative to the name of the zone.
:param zone_name:
Name of the DNS zone (without a terminating dot).
:param resource_group:
The resource group assigned to the DNS zone.
:param record_type:
The type of DNS record in this record set. Record sets of type SOA can be updated but not created
(they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME',
'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
:param if_match:
The etag of the record set. Omit this value to always overwrite the current record set. Specify the last-seen
etag value to prevent accidentally overwritting any concurrent changes.
:param if_none_match:
Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values
will be ignored.
:param etag:
The etag of the record set. `Etags <https://docs.microsoft.com/en-us/azure/dns/dns-zones-records#etags>`__ are
used to handle concurrent changes to the same resource safely.
:param metadata:
A dictionary of strings can be passed as tag metadata to the record set object.
:param ttl:
The TTL (time-to-live) of the records in the record set. Required when specifying record information.
:param arecords:
The list of A records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.arecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param aaaa_records:
The list of AAAA records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.aaaarecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param mx_records:
The list of MX records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.mxrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param ns_records:
The list of NS records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.nsrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param ptr_records:
The list of PTR records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.ptrrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param srv_records:
The list of SRV records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.srvrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param txt_records:
The list of TXT records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.txtrecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param cname_record:
The CNAME record in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.cnamerecord?view=azure-python>`__
to create a dictionary representing the record object.
:param soa_record:
The SOA record in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.soarecord?view=azure-python>`__
to create a dictionary representing the record object.
:param caa_records:
The list of CAA records in the record set. View the
`Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.caarecord?view=azure-python>`__
to create a list of dictionaries representing the record objects.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure record set exists:
azurearm_dns.record_set_present:
- name: web
- zone_name: contoso.com
- resource_group: my_rg
- record_type: A
- ttl: 300
- arecords:
- ipv4_address: 10.0.0.1
- metadata:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
record_vars = [
"arecords",
"aaaa_records",
"mx_records",
"ns_records",
"ptr_records",
"srv_records",
"txt_records",
"cname_record",
"soa_record",
"caa_records",
]
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via connection_auth dictionary!"
return ret
rec_set = __salt__["azurearm_dns.record_set_get"](
name,
zone_name,
resource_group,
record_type,
azurearm_log_level="info",
**connection_auth
)
if "error" not in rec_set:
metadata_changes = __utils__["dictdiffer.deep_diff"](
rec_set.get("metadata", {}), metadata or {}
)
if metadata_changes:
ret["changes"]["metadata"] = metadata_changes
for record_str in record_vars:
# pylint: disable=eval-used
record = eval(record_str)
if record:
if not ttl:
ret[
"comment"
] = "TTL is required when specifying record information!"
return ret
if not rec_set.get(record_str):
ret["changes"] = {"new": {record_str: record}}
continue
if record_str[-1] != "s":
if not isinstance(record, dict):
ret[
"comment"
] = "{} record information must be specified as a dictionary!".format(
record_str
)
return ret
for k, v in record.items():
if v != rec_set[record_str].get(k):
ret["changes"] = {"new": {record_str: record}}
elif record_str[-1] == "s":
if not isinstance(record, list):
ret["comment"] = (
"{} record information must be specified as a list of"
" dictionaries!".format(record_str)
)
return ret
local, remote = (
sorted(config) for config in (record, rec_set[record_str])
)
for val in local:
for key in val:
local_val = val[key]
remote_val = remote.get(key)
if isinstance(local_val, str):
local_val = local_val.lower()
if isinstance(remote_val, str):
remote_val = remote_val.lower()
if local_val != remote_val:
ret["changes"] = {"new": {record_str: record}}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Record set {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Record set {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"zone_name": zone_name,
"resource_group": resource_group,
"record_type": record_type,
"etag": etag,
"metadata": metadata,
"ttl": ttl,
},
}
for record in record_vars:
# pylint: disable=eval-used
if eval(record):
# pylint: disable=eval-used
ret["changes"]["new"][record] = eval(record)
if __opts__["test"]:
ret["comment"] = "Record set {} would be created.".format(name)
ret["result"] = None
return ret
rec_set_kwargs = kwargs.copy()
rec_set_kwargs.update(connection_auth)
rec_set = __salt__["azurearm_dns.record_set_create_or_update"](
name=name,
zone_name=zone_name,
resource_group=resource_group,
record_type=record_type,
if_match=if_match,
if_none_match=if_none_match,
etag=etag,
ttl=ttl,
metadata=metadata,
arecords=arecords,
aaaa_records=aaaa_records,
mx_records=mx_records,
ns_records=ns_records,
ptr_records=ptr_records,
srv_records=srv_records,
txt_records=txt_records,
cname_record=cname_record,
soa_record=soa_record,
caa_records=caa_records,
**rec_set_kwargs
)
if "error" not in rec_set:
ret["result"] = True
ret["comment"] = "Record set {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create record set {}! ({})".format(
name, rec_set.get("error")
)
return ret
@_deprecation_message
def record_set_absent(name, zone_name, resource_group, connection_auth=None):
"""
.. versionadded:: 3000
Ensure a record set does not exist in the DNS zone.
:param name:
Name of the record set.
:param zone_name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via connection_auth dictionary!"
return ret
rec_set = __salt__["azurearm_dns.record_set_get"](
name, zone_name, resource_group, azurearm_log_level="info", **connection_auth
)
if "error" in rec_set:
ret["result"] = True
ret["comment"] = "Record set {} was not found in zone {}.".format(
name, zone_name
)
return ret
elif __opts__["test"]:
ret["comment"] = "Record set {} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": rec_set,
"new": {},
}
return ret
deleted = __salt__["azurearm_dns.record_set_delete"](
name, zone_name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Record set {} has been deleted.".format(name)
ret["changes"] = {"old": rec_set, "new": {}}
return ret
ret["comment"] = "Failed to delete record set {}!".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/azurearm_dns.py | 0.845783 | 0.161519 | azurearm_dns.py | pypi |
__virtualname__ = "keystone_domain"
def __virtual__():
if "keystoneng.domain_get" in __salt__:
return __virtualname__
return (
False,
"The keystoneng execution module failed to load: shade python module is not"
" available",
)
def present(name, auth=None, **kwargs):
"""
Ensure domain exists and is up-to-date
name
Name of the domain
enabled
Boolean to control if domain is enabled
description
An arbitrary description of the domain
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
kwargs = __utils__["args.clean_kwargs"](**kwargs)
__salt__["keystoneng.setup_clouds"](auth)
domain = __salt__["keystoneng.domain_get"](name=name)
if not domain:
if __opts__["test"]:
ret["result"] = None
ret["changes"] = kwargs
ret["comment"] = "Domain {} will be created.".format(name)
return ret
kwargs["name"] = name
domain = __salt__["keystoneng.domain_create"](**kwargs)
ret["changes"] = domain
ret["comment"] = "Created domain"
return ret
changes = __salt__["keystoneng.compare_changes"](domain, **kwargs)
if changes:
if __opts__["test"]:
ret["result"] = None
ret["changes"] = changes
ret["comment"] = "Domain {} will be updated.".format(name)
return ret
kwargs["domain_id"] = domain.id
__salt__["keystoneng.domain_update"](**kwargs)
ret["changes"].update(changes)
ret["comment"] = "Updated domain"
return ret
def absent(name, auth=None):
"""
Ensure domain does not exist
name
Name of the domain
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
__salt__["keystoneng.setup_clouds"](auth)
domain = __salt__["keystoneng.domain_get"](name=name)
if domain:
if __opts__["test"] is True:
ret["result"] = None
ret["changes"] = {"name": name}
ret["comment"] = "Domain {} will be deleted.".format(name)
return ret
__salt__["keystoneng.domain_delete"](name=domain)
ret["changes"]["id"] = domain.id
ret["comment"] = "Deleted domain"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/keystone_domain.py | 0.729134 | 0.156201 | keystone_domain.py | pypi |
__monitor__ = [
"loadavg",
"process",
]
def loadavg(name, maximum=None, minimum=None):
"""
Return the current load average for the specified minion. Available values
for name are `1-min`, `5-min` and `15-min`. `minimum` and `maximum` values
should be passed in as strings.
"""
# Monitoring state, no changes will be made so no test interface needed
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
"data": {},
} # Data field for monitoring state
data = __salt__["status.loadavg"]()
if name not in data:
ret["result"] = False
ret["comment"] += "Requested load average {} not available ".format(name)
return ret
if minimum and maximum and minimum >= maximum:
ret["comment"] += "Min must be less than max"
if ret["comment"]:
return ret
cap = float(data[name])
ret["data"] = data[name]
if minimum:
if cap < float(minimum):
ret["comment"] = "Load avg is below minimum of {} at {}".format(
minimum, cap
)
return ret
if maximum:
if cap > float(maximum):
ret["comment"] = "Load avg above maximum of {} at {}".format(maximum, cap)
return ret
ret["comment"] = "Load avg in acceptable range"
ret["result"] = True
return ret
def process(name):
"""
Return whether the specified signature is found in the process tree. This
differs slightly from the services states, in that it may refer to a
process that is not managed via the init system.
"""
# Monitoring state, no changes will be made so no test interface needed
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
"data": {},
} # Data field for monitoring state
data = __salt__["status.pid"](name)
if not data:
ret["result"] = False
ret["comment"] += 'Process signature "{}" not found '.format(name)
return ret
ret["data"] = data
ret["comment"] += 'Process signature "{}" was found '.format(name)
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/status.py | 0.853272 | 0.366306 | status.py | pypi |
def __virtual__():
"""
Only load if the victorops module is available in __salt__
"""
if "victorops.create_event" in __salt__:
return "victorops"
return (False, "victorops module could not be loaded")
def create_event(name, message_type, routing_key="everyone", **kwargs):
"""
Create an event on the VictorOps service
.. code-block:: yaml
webserver-warning-message:
victorops.create_event:
- message_type: 'CRITICAL'
- entity_id: 'webserver/diskspace'
- state_message: 'Webserver diskspace is low.'
database-server-warning-message:
victorops.create_event:
- message_type: 'WARNING'
- entity_id: 'db_server/load'
- state_message: 'Database Server load is high.'
- entity_is_host: True
- entity_display_name: 'dbdserver.example.com'
The following parameters are required:
name
This is a short description of the event.
message_type
One of the following values: INFO, WARNING, ACKNOWLEDGEMENT, CRITICAL, RECOVERY.
The following parameters are optional:
routing_key
The key for where messages should be routed. By default, sent to 'everyone' route.
entity_id
The name of alerting entity. If not provided, a random name will be assigned.
timestamp
Timestamp of the alert in seconds since epoch. Defaults to the time the alert is received at VictorOps.
timestamp_fmt
The date format for the timestamp parameter. Defaults to ''%Y-%m-%dT%H:%M:%S'.
state_start_time
The time this entity entered its current state (seconds since epoch). Defaults to the time alert is received.
state_start_time_fmt
The date format for the timestamp parameter. Defaults to '%Y-%m-%dT%H:%M:%S'.
state_message
Any additional status information from the alert item.
entity_is_host
Used within VictorOps to select the appropriate display format for the incident.
entity_display_name
Used within VictorOps to display a human-readable name for the entity.
ack_message
A user entered comment for the acknowledgment.
ack_author
The user that acknowledged the incident.
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "Need to create event: {}".format(name)
return ret
res = __salt__["victorops.create_event"](
message_type=message_type, routing_key=routing_key, **kwargs
)
if res["result"] == "success":
ret["result"] = True
ret["comment"] = "Created event: {} for entity {}".format(
name, res["entity_id"]
)
else:
ret["result"] = False
ret["comment"] = "Failed to create event: {}".format(res["message"])
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/victorops.py | 0.75183 | 0.237311 | victorops.py | pypi |
import logging
log = logging.getLogger(__name__)
def __virtual__():
if "libcloud_loadbalancer.list_balancers" in __salt__:
return True
return (False, "libcloud_loadbalancer module could not be loaded")
def state_result(result, message, name, changes=None):
if changes is None:
changes = {}
return {"result": result, "comment": message, "name": name, "changes": changes}
def balancer_present(
name, port, protocol, profile, algorithm=None, members=None, **libcloud_kwargs
):
"""
Ensures a load balancer is present.
:param name: Load Balancer name
:type name: ``str``
:param port: Port the load balancer should listen on, defaults to 80
:type port: ``str``
:param protocol: Loadbalancer protocol, defaults to http.
:type protocol: ``str``
:param profile: The profile key
:type profile: ``str``
:param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. See Algorithm type
in Libcloud documentation for a full listing.
:type algorithm: ``str``
:param members: An optional list of members to create on deployment
:type members: ``list`` of ``dict`` (ip, port)
"""
balancers = __salt__["libcloud_loadbalancer.list_balancers"](profile)
match = [z for z in balancers if z["name"] == name]
if len(match) > 0:
return state_result(True, "Balancer already exists", name)
else:
starting_members = None
if members is not None:
starting_members = []
for m in members:
starting_members.append({"ip": m["ip"], "port": m["port"]})
balancer = __salt__["libcloud_loadbalancer.create_balancer"](
name,
port,
protocol,
profile,
algorithm=algorithm,
members=starting_members,
**libcloud_kwargs
)
return state_result(True, "Created new load balancer", name, balancer)
def balancer_absent(name, profile, **libcloud_kwargs):
"""
Ensures a load balancer is absent.
:param name: Load Balancer name
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
"""
balancers = __salt__["libcloud_loadbalancer.list_balancers"](profile)
match = [z for z in balancers if z["name"] == name]
if len(match) == 0:
return state_result(True, "Balancer already absent", name)
else:
result = __salt__["libcloud_loadbalancer.destroy_balancer"](
match[0]["id"], profile, **libcloud_kwargs
)
return state_result(result, "Deleted load balancer", name)
def member_present(ip, port, balancer_id, profile, **libcloud_kwargs):
"""
Ensure a load balancer member is present
:param ip: IP address for the new member
:type ip: ``str``
:param port: Port for the new member
:type port: ``int``
:param balancer_id: id of a load balancer you want to attach the member to
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
"""
existing_members = __salt__["libcloud_loadbalancer.list_balancer_members"](
balancer_id, profile
)
for member in existing_members:
if member["ip"] == ip and member["port"] == port:
return state_result(True, "Member already present", balancer_id)
member = __salt__["libcloud_loadbalancer.balancer_attach_member"](
balancer_id, ip, port, profile, **libcloud_kwargs
)
return state_result(
True,
"Member added to balancer, id: {}".format(member["id"]),
balancer_id,
member,
)
def member_absent(ip, port, balancer_id, profile, **libcloud_kwargs):
"""
Ensure a load balancer member is absent, based on IP and Port
:param ip: IP address for the member
:type ip: ``str``
:param port: Port for the member
:type port: ``int``
:param balancer_id: id of a load balancer you want to detach the member from
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
"""
existing_members = __salt__["libcloud_loadbalancer.list_balancer_members"](
balancer_id, profile
)
for member in existing_members:
if member["ip"] == ip and member["port"] == port:
result = __salt__["libcloud_loadbalancer.balancer_detach_member"](
balancer_id, member["id"], profile, **libcloud_kwargs
)
return state_result(result, "Member removed", balancer_id)
return state_result(True, "Member already absent", balancer_id) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/libcloud_loadbalancer.py | 0.724383 | 0.156298 | libcloud_loadbalancer.py | pypi |
import collections
def __virtual__():
"""
Only load if the mssql module is present
"""
if "mssql.version" in __salt__:
return True
return (False, "mssql module could not be loaded")
def _normalize_options(options):
if type(options) in [dict, collections.OrderedDict]:
return ["{}={}".format(k, v) for k, v in options.items()]
if type(options) is list and (not options or type(options[0]) is str):
return options
# Invalid options
if type(options) is not list or type(options[0]) not in [
dict,
collections.OrderedDict,
]:
return []
return [o for d in options for o in _normalize_options(d)]
def present(
name, password=None, domain=None, server_roles=None, options=None, **kwargs
):
"""
Checks existence of the named login.
If not present, creates the login with the specified roles and options.
name
The name of the login to manage
password
Creates a SQL Server authentication login
Since hashed passwords are varbinary values, if the
new_login_password is 'long', it will be considered
to be HASHED.
domain
Creates a Windows authentication login.
Needs to be NetBIOS domain or hostname
server_roles
Add this login to all the server roles in the list
options
Can be a list of strings, a dictionary, or a list of dictionaries
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if bool(password) == bool(domain):
ret["result"] = False
ret["comment"] = "One and only one of password and domain should be specifies"
return ret
if __salt__["mssql.login_exists"](name, domain=domain, **kwargs):
ret[
"comment"
] = "Login {} is already present (Not going to try to set its password)".format(
name
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Login {} is set to be added".format(name)
return ret
login_created = __salt__["mssql.login_create"](
name,
new_login_password=password,
new_login_domain=domain,
new_login_roles=server_roles,
new_login_options=_normalize_options(options),
**kwargs
)
# Non-empty strings are also evaluated to True, so we cannot use if not login_created:
if login_created is not True:
ret["result"] = False
ret["comment"] = "Login {} failed to be added: {}".format(name, login_created)
return ret
ret["comment"] = "Login {} has been added. ".format(name)
ret["changes"][name] = "Present"
return ret
def absent(name, **kwargs):
"""
Ensure that the named login is absent
name
The name of the login to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not __salt__["mssql.login_exists"](name):
ret["comment"] = "Login {} is not present".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Login {} is set to be removed".format(name)
return ret
if __salt__["mssql.login_remove"](name, **kwargs):
ret["comment"] = "Login {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
# else:
ret["result"] = False
ret["comment"] = "Login {} failed to be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mssql_login.py | 0.502441 | 0.228044 | mssql_login.py | pypi |
def __virtual__():
"""
Only load if the influxdb module is available
"""
if "influxdb.db_exists" in __salt__:
return "influxdb_continuous_query"
return (False, "influxdb module could not be loaded")
def present(
name, database, query, resample_time=None, coverage_period=None, **client_args
):
"""
Ensure that given continuous query is present.
name
Name of the continuous query to create.
database
Database to create continuous query on.
query
The query content
resample_time : None
Duration between continuous query resampling.
coverage_period : None
Duration specifying time period per sample.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "continuous query {} is already present".format(name),
}
if not __salt__["influxdb.continuous_query_exists"](
name=name, database=database, **client_args
):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = " {} is absent and will be created".format(name)
return ret
if __salt__["influxdb.create_continuous_query"](
database, name, query, resample_time, coverage_period
):
ret["comment"] = "continuous query {} has been created".format(name)
ret["changes"][name] = "Present"
return ret
else:
ret["comment"] = "Failed to create continuous query {}".format(name)
ret["result"] = False
return ret
return ret
def absent(name, database, **client_args):
"""
Ensure that given continuous query is absent.
name
Name of the continuous query to remove.
database
Name of the database that the continuous query was defined on.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "continuous query {} is not present".format(name),
}
if __salt__["influxdb.continuous_query_exists"](database, name, **client_args):
if __opts__["test"]:
ret["result"] = None
ret[
"comment"
] = "continuous query {} is present and needs to be removed".format(name)
return ret
if __salt__["influxdb.drop_continuous_query"](database, name, **client_args):
ret["comment"] = "continuous query {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "Failed to remove continuous query {}".format(name)
ret["result"] = False
return ret
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/influxdb_continuous_query.py | 0.788502 | 0.265428 | influxdb_continuous_query.py | pypi |
import logging
import xml.etree.ElementTree as ET
import salt.utils.compat
import salt.utils.json
log = logging.getLogger(__name__)
__virtualname__ = "boto_cfn"
def __virtual__():
"""
Only load if elementtree xml library and boto are available.
"""
if "boto_cfn.exists" in __salt__:
return True
else:
return (
False,
"Cannot load {} state: boto_cfn module unavailable".format(__virtualname__),
)
def present(
name,
template_body=None,
template_url=None,
parameters=None,
notification_arns=None,
disable_rollback=None,
timeout_in_minutes=None,
capabilities=None,
tags=None,
on_failure=None,
stack_policy_body=None,
stack_policy_url=None,
use_previous_template=None,
stack_policy_during_update_body=None,
stack_policy_during_update_url=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure cloud formation stack is present.
name (string) - Name of the stack.
template_body (string) – Structure containing the template body. Can also be loaded from a file by using salt://.
template_url (string) – Location of file containing the template body. The URL must point to a template located in
an S3 bucket in the same region as the stack.
parameters (list) – A list of key/value tuples that specify input parameters for the stack. A 3-tuple (key, value,
bool) may be used to specify the UsePreviousValue option.
notification_arns (list) – The Simple Notification Service (SNS) topic ARNs to publish stack related events.
You can find your SNS topic ARNs using the `SNS_console`_ or your Command Line Interface (CLI).
disable_rollback (bool) – Indicates whether or not to rollback on failure.
timeout_in_minutes (integer) – The amount of time that can pass before the stack status becomes CREATE_FAILED; if
DisableRollback is not set or is set to False, the stack will be rolled back.
capabilities (list) – The list of capabilities you want to allow in the stack. Currently, the only valid capability
is ‘CAPABILITY_IAM’.
tags (dict) – A set of user-defined Tags to associate with this stack, represented by key/value pairs. Tags defined
for the stack are propagated to EC2 resources that are created as part of the stack. A maximum number of 10 tags can
be specified.
on_failure (string) – Determines what action will be taken if stack creation fails. This must be one of:
DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both.
stack_policy_body (string) – Structure containing the stack policy body. Can also be loaded from a file by using
salt://.
stack_policy_url (string) – Location of a file containing the stack policy. The URL must point to a policy
(max size: 16KB) located in an S3 bucket in the same region as the stack.If you pass StackPolicyBody and
StackPolicyURL, only StackPolicyBody is used.
use_previous_template (boolean) – Used only when templates are not the same. Set to True to use the previous
template instead of uploading a new one via TemplateBody or TemplateURL.
stack_policy_during_update_body (string) – Used only when templates are not the same. Structure containing the
temporary overriding stack policy body. If you pass StackPolicyDuringUpdateBody and StackPolicyDuringUpdateURL,
only StackPolicyDuringUpdateBody is used. Can also be loaded from a file by using salt://.
stack_policy_during_update_url (string) – Used only when templates are not the same. Location of a file containing
the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in
the same region as the stack. If you pass StackPolicyDuringUpdateBody and StackPolicyDuringUpdateURL, only
StackPolicyDuringUpdateBody is used.
region (string) - Region to connect to.
key (string) - Secret key to be used.
keyid (string) - Access key to be used.
profile (dict) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key
and keyid.
.. _`SNS_console`: https://console.aws.amazon.com/sns
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
template_body = _get_template(template_body, name)
stack_policy_body = _get_template(stack_policy_body, name)
stack_policy_during_update_body = _get_template(
stack_policy_during_update_body, name
)
for i in [template_body, stack_policy_body, stack_policy_during_update_body]:
if isinstance(i, dict):
return i
_valid = _validate(template_body, template_url, region, key, keyid, profile)
log.debug("Validate is : %s.", _valid)
if _valid is not True:
code, message = _valid
ret["result"] = False
ret["comment"] = "Template could not be validated.\n{} \n{}".format(
code, message
)
return ret
log.debug("Template %s is valid.", name)
if __salt__["boto_cfn.exists"](name, region, key, keyid, profile):
template = __salt__["boto_cfn.get_template"](name, region, key, keyid, profile)
template = template["GetTemplateResponse"]["GetTemplateResult"][
"TemplateBody"
].encode("ascii", "ignore")
template = salt.utils.json.loads(template)
_template_body = salt.utils.json.loads(template_body)
compare = salt.utils.compat.cmp(template, _template_body)
if compare != 0:
log.debug("Templates are not the same. Compare value is %s", compare)
# At this point we should be able to run update safely since we already validated the template
if __opts__["test"]:
ret["comment"] = "Stack {} is set to be updated.".format(name)
ret["result"] = None
return ret
updated = __salt__["boto_cfn.update_stack"](
name,
template_body,
template_url,
parameters,
notification_arns,
disable_rollback,
timeout_in_minutes,
capabilities,
tags,
use_previous_template,
stack_policy_during_update_body,
stack_policy_during_update_url,
stack_policy_body,
stack_policy_url,
region,
key,
keyid,
profile,
)
if isinstance(updated, str):
code, message = _get_error(updated)
log.debug("Update error is %s and message is %s", code, message)
ret["result"] = False
ret["comment"] = "Stack {} could not be updated.\n{} \n{}.".format(
name, code, message
)
return ret
ret["comment"] = "Cloud formation template {} has been updated.".format(
name
)
ret["changes"]["new"] = updated
return ret
ret["comment"] = "Stack {} exists.".format(name)
ret["changes"] = {}
return ret
if __opts__["test"]:
ret["comment"] = "Stack {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_cfn.create"](
name,
template_body,
template_url,
parameters,
notification_arns,
disable_rollback,
timeout_in_minutes,
capabilities,
tags,
on_failure,
stack_policy_body,
stack_policy_url,
region,
key,
keyid,
profile,
)
if created:
ret["comment"] = "Stack {} was created.".format(name)
ret["changes"]["new"] = created
return ret
ret["result"] = False
return ret
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure cloud formation stack is absent.
name (string) – The name of the stack to delete.
region (string) - Region to connect to.
key (string) - Secret key to be used.
keyid (string) - Access key to be used.
profile (dict) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key
and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if not __salt__["boto_cfn.exists"](name, region, key, keyid, profile):
ret["comment"] = "Stack {} does not exist.".format(name)
ret["changes"] = {}
return ret
if __opts__["test"]:
ret["comment"] = "Stack {} is set to be deleted.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_cfn.delete"](name, region, key, keyid, profile)
if isinstance(deleted, str):
code, message = _get_error(deleted)
ret["comment"] = "Stack {} could not be deleted.\n{}\n{}".format(
name, code, message
)
ret["result"] = False
ret["changes"] = {}
return ret
if deleted:
ret["comment"] = "Stack {} was deleted.".format(name)
ret["changes"]["deleted"] = name
return ret
def _get_template(template, name):
# Checks if template is a file in salt defined by salt://.
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if template is not None and "salt://" in template:
try:
return __salt__["cp.get_file_str"](template)
except OSError as e:
log.debug(e)
ret["comment"] = "File {} not found.".format(template)
ret["result"] = False
return ret
return template
def _validate(
template_body=None,
template_url=None,
region=None,
key=None,
keyid=None,
profile=None,
):
# Validates template. returns true if template syntax is correct.
validate = __salt__["boto_cfn.validate_template"](
template_body, template_url, region, key, keyid, profile
)
log.debug("Validate result is %s.", validate)
if isinstance(validate, str):
code, message = _get_error(validate)
log.debug("Validate error is %s and message is %s.", code, message)
return code, message
return True
def _get_error(error):
# Converts boto exception to string that can be used to output error.
error = "\n".join(error.split("\n")[1:])
error = ET.fromstring(error)
code = error[0][1].text
message = error[0][2].text
return code, message | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_cfn.py | 0.614394 | 0.216405 | boto_cfn.py | pypi |
def __virtual__():
"""
Only load if aws is available.
"""
if __salt__["cmd.has_exec"]("aws"):
return "aws_sqs"
return (False, "aws command not found")
def exists(name, region, user=None, opts=False):
"""
Ensure the SQS queue exists.
name
Name of the SQS queue.
region
Region to create the queue
user
Name of the user performing the SQS operations
opts
Include additional arguments and options to the aws command line
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
does_exist = __salt__["aws_sqs.queue_exists"](name, region, opts, user)
if not does_exist:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "AWS SQS queue {} is set to be created".format(name)
return ret
created = __salt__["aws_sqs.create_queue"](name, region, opts, user)
if created["retcode"] == 0:
ret["changes"]["new"] = created["stdout"]
else:
ret["result"] = False
ret["comment"] = created["stderr"]
else:
ret["comment"] = "{} exists in {}".format(name, region)
return ret
def absent(name, region, user=None, opts=False):
"""
Remove the named SQS queue if it exists.
name
Name of the SQS queue.
region
Region to remove the queue from
user
Name of the user performing the SQS operations
opts
Include additional arguments and options to the aws command line
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
does_exist = __salt__["aws_sqs.queue_exists"](name, region, opts, user)
if does_exist:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "AWS SQS queue {} is set to be removed".format(name)
return ret
removed = __salt__["aws_sqs.delete_queue"](name, region, opts, user)
if removed["retcode"] == 0:
ret["changes"]["removed"] = removed["stdout"]
else:
ret["result"] = False
ret["comment"] = removed["stderr"]
else:
ret["comment"] = "{} does not exist in {}".format(name, region)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/aws_sqs.py | 0.581897 | 0.193433 | aws_sqs.py | pypi |
def license_present(name):
"""
Ensures that the specified PowerPath license key is present
on the host.
name
The license key to ensure is present
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if not __salt__["powerpath.has_powerpath"]():
ret["result"] = False
ret["comment"] = "PowerPath is not installed."
return ret
licenses = [l["key"] for l in __salt__["powerpath.list_licenses"]()]
if name in licenses:
ret["result"] = True
ret["comment"] = "License key {} already present".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "License key {} is set to be added".format(name)
return ret
data = __salt__["powerpath.add_license"](name)
if data["result"]:
ret["changes"] = {name: "added"}
ret["result"] = True
ret["comment"] = data["output"]
return ret
else:
ret["result"] = False
ret["comment"] = data["output"]
return ret
def license_absent(name):
"""
Ensures that the specified PowerPath license key is absent
on the host.
name
The license key to ensure is absent
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if not __salt__["powerpath.has_powerpath"]():
ret["result"] = False
ret["comment"] = "PowerPath is not installed."
return ret
licenses = [l["key"] for l in __salt__["powerpath.list_licenses"]()]
if name not in licenses:
ret["result"] = True
ret["comment"] = "License key {} not present".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "License key {} is set to be removed".format(name)
return ret
data = __salt__["powerpath.remove_license"](name)
if data["result"]:
ret["changes"] = {name: "removed"}
ret["result"] = True
ret["comment"] = data["output"]
return ret
else:
ret["result"] = False
ret["comment"] = data["output"]
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/powerpath.py | 0.646683 | 0.20831 | powerpath.py | pypi |
import salt.exceptions
import salt.utils.path
def __virtual__():
"""
Ensure the racadm command is installed
"""
if salt.utils.path.which("racadm"):
return True
return (False, "racadm command not found")
def present(name, password, permission):
"""
Ensure the user exists on the Dell DRAC
name:
The users username
password
The password used to authenticate
permission
The permissions that should be assigned to a user
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
users = __salt__["drac.list_users"]()
if __opts__["test"]:
if name in users:
ret["comment"] = "`{}` already exists".format(name)
else:
ret["comment"] = "`{}` will be created".format(name)
ret["changes"] = {name: "will be created"}
return ret
if name in users:
ret["comment"] = "`{}` already exists".format(name)
else:
if __salt__["drac.create_user"](name, password, permission, users):
ret["comment"] = "`{}` user created".format(name)
ret["changes"] = {name: "new user created"}
else:
ret["comment"] = "Unable to create user"
ret["result"] = False
return ret
def absent(name):
"""
Ensure a user does not exist on the Dell DRAC
name:
The users username
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
users = __salt__["drac.list_users"]()
if __opts__["test"]:
if name in users:
ret["comment"] = "`{}` is set to be deleted".format(name)
ret["changes"] = {name: "will be deleted"}
else:
ret["comment"] = "`{}` does not exist".format(name)
return ret
if name in users:
if __salt__["drac.delete_user"](name, users[name]["index"]):
ret["comment"] = "`{}` deleted".format(name)
ret["changes"] = {name: "deleted"}
else:
ret["comment"] = "Unable to delete user"
ret["result"] = False
else:
ret["comment"] = "`{}` does not exist".format(name)
return ret
def network(ip, netmask, gateway):
"""
Ensure the DRAC network settings are consistent
"""
ret = {"name": ip, "result": True, "changes": {}, "comment": ""}
current_network = __salt__["drac.network_info"]()
new_network = {}
if ip != current_network["IPv4 settings"]["IP Address"]:
ret["changes"].update(
{
"IP Address": {
"Old": current_network["IPv4 settings"]["IP Address"],
"New": ip,
}
}
)
if netmask != current_network["IPv4 settings"]["Subnet Mask"]:
ret["changes"].update(
{
"Netmask": {
"Old": current_network["IPv4 settings"]["Subnet Mask"],
"New": netmask,
}
}
)
if gateway != current_network["IPv4 settings"]["Gateway"]:
ret["changes"].update(
{
"Gateway": {
"Old": current_network["IPv4 settings"]["Gateway"],
"New": gateway,
}
}
)
if __opts__["test"]:
ret["result"] = None
return ret
if __salt__["drac.set_network"](ip, netmask, gateway):
if not ret["changes"]:
ret["comment"] = "Network is in the desired state"
return ret
ret["result"] = False
ret["comment"] = "unable to configure network"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/drac.py | 0.462716 | 0.190103 | drac.py | pypi |
import logging
import salt.utils.data
import salt.utils.platform
import salt.utils.win_update
log = logging.getLogger(__name__)
__virtualname__ = "wua"
def __virtual__():
"""
Only valid on Windows machines
"""
if not salt.utils.platform.is_windows():
return False, "WUA: Only available on Window systems"
if not salt.utils.win_update.HAS_PYWIN32:
return False, "WUA: Requires PyWin32 libraries"
return __virtualname__
def installed(name, updates=None):
"""
Ensure Microsoft Updates are installed. Updates will be downloaded if
needed.
Args:
name (str):
The identifier of a single update to install.
updates (list):
A list of identifiers for updates to be installed. Overrides
``name``. Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
to ensure you're installing the correct update.
.. warning:: Using a partial KB number or a partial Title could result in
more than one update being installed.
Returns:
dict: A dictionary containing the results of the update. There are three
keys under changes. `installed` is a list of updates that were
successfully installed. `failed` is a list of updates that failed
to install. `superseded` is a list of updates that were not
installed because they were superseded by another update.
CLI Example:
.. code-block:: yaml
# using a GUID
install_update:
wua.installed:
- name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211
# using a KB
install_update:
wua.installed:
- name: KB3194343
# using the full Title
install_update:
wua.installed:
- name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)
# Install multiple updates
install_updates:
wua.installed:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
"""
if isinstance(updates, str):
updates = [updates]
if not updates:
updates = name
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for updates
install_list = wua.search(updates)
# No updates found
if install_list.count() == 0:
ret["comment"] = "No updates found"
return ret
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
installed_updates = []
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsInstalled):
install.updates.Add(item)
else:
installed_updates.extend("KB" + kb for kb in item.KBArticleIDs)
if install.count() == 0:
ret["comment"] = "Updates already installed: "
ret["comment"] += "\n - ".join(installed_updates)
return ret
# Return comment of changes if test.
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Updates will be installed:"
for update in install.updates:
ret["comment"] += "\n"
ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# superseded_updates is a list of updates that the WUA first requested to be
# installed but became ineligible for installation because they were
# superseded
superseded_updates = {}
failed_updates = {}
installed_updates = {}
# Verify the installation
installed_items = install.list()
for item in installed_items:
if item not in post_info:
# Update (item) was not installed for valid reason
superseded_updates[item] = {
"Title": installed_items[item]["Title"],
"KBs": installed_items[item]["KBs"],
}
else:
if not salt.utils.data.is_true(post_info[item]["Installed"]):
failed_updates[item] = {
"Title": post_info[item]["Title"],
"KBs": post_info[item]["KBs"],
}
else:
installed_updates[item] = {
"Title": post_info[item]["Title"],
"NeedsReboot": post_info[item]["NeedsReboot"],
"KBs": post_info[item]["KBs"],
}
comments = []
if installed_updates:
comments.append("Updates installed successfully")
ret["changes"]["installed"] = installed_updates
if failed_updates:
comments.append("Some updates failed to install")
ret["changes"]["failed"] = failed_updates
ret["result"] = False
# Add the list of updates not installed to the return
if superseded_updates:
comments.append("Some updates were superseded")
ret["changes"]["superseded"] = superseded_updates
ret["comment"] = "\n".join(comments)
return ret
def removed(name, updates=None):
"""
Ensure Microsoft Updates are uninstalled.
Args:
name (str):
The identifier of a single update to uninstall.
updates (list):
A list of identifiers for updates to be removed. Overrides ``name``.
Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
to ensure you're uninstalling the correct update.
.. warning:: Using a partial KB number or a partial Title could result in
more than one update being removed.
Returns:
dict: A dictionary containing the results of the removal. There are
three keys under changes. `removed` is a list of updates that
were successfully removed. `failed` is a list of updates that
failed to be removed.
CLI Example:
.. code-block:: yaml
# using a GUID
uninstall_update:
wua.removed:
- name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211
# using a KB
uninstall_update:
wua.removed:
- name: KB3194343
# using the full Title
uninstall_update:
wua.removed:
- name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)
# Install multiple updates
uninstall_updates:
wua.removed:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
"""
if isinstance(updates, str):
updates = [updates]
if not updates:
updates = name
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for updates
updates = wua.search(updates)
# No updates found
if updates.count() == 0:
ret["comment"] = "No updates found"
return ret
# List of updates to uninstall
uninstall = salt.utils.win_update.Updates()
removed_updates = []
for item in updates.updates:
if salt.utils.data.is_true(item.IsInstalled):
uninstall.updates.Add(item)
else:
removed_updates.extend("KB" + kb for kb in item.KBArticleIDs)
if uninstall.count() == 0:
ret["comment"] = "Updates already removed: "
ret["comment"] += "\n - ".join(removed_updates)
return ret
# Return comment of changes if test.
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Updates will be removed:"
for update in uninstall.updates:
ret["comment"] += "\n"
ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title])
return ret
# Install updates
wua.uninstall(uninstall)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
failed_updates = {}
removed_updates = {}
# Verify the installation
for item in uninstall.list():
if salt.utils.data.is_true(post_info[item]["Installed"]):
failed_updates[item] = {
"Title": post_info[item]["Title"],
"KBs": post_info[item]["KBs"],
}
else:
removed_updates[item] = {
"Title": post_info[item]["Title"],
"NeedsReboot": post_info[item]["NeedsReboot"],
"KBs": post_info[item]["KBs"],
}
if removed_updates:
ret["comment"] = "Updates removed successfully"
ret["changes"]["removed"] = removed_updates
if failed_updates:
ret["comment"] = "Some updates failed to uninstall"
ret["changes"]["failed"] = failed_updates
ret["result"] = False
return ret
def uptodate(
name,
software=True,
drivers=False,
skip_hidden=False,
skip_mandatory=False,
skip_reboot=True,
categories=None,
severities=None,
):
"""
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update. There are three
keys under changes. `installed` is a list of updates that were
successfully installed. `failed` is a list of updates that failed
to install. `superseded` is a list of updates that were not
installed because they were superseded by another update.
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.uptodate
# Update the drivers
update_drivers:
wua.uptodate:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.uptodate:
- severities:
- Critical
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
wua = salt.utils.win_update.WindowsUpdateAgent()
available_updates = wua.available(
skip_hidden=skip_hidden,
skip_installed=True,
skip_mandatory=skip_mandatory,
skip_reboot=skip_reboot,
software=software,
drivers=drivers,
categories=categories,
severities=severities,
)
# No updates found
if available_updates.count() == 0:
ret["comment"] = "No updates found"
return ret
updates = list(available_updates.list().keys())
# Search for updates
install_list = wua.search(updates)
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsInstalled):
install.updates.Add(item)
# Return comment of changes if test.
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Updates will be installed:"
for update in install.updates:
ret["comment"] += "\n"
ret["comment"] += ": ".join([update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# superseded_updates is a list of updates that the WUA first requested to be
# installed but became ineligible for installation because they were
# superseded by other updates
superseded_updates = {}
failed_updates = {}
installed_updates = {}
# Verify the installation
installed_items = install.list()
for item in installed_items:
if item not in post_info:
# Update (item) was not installed for valid reason
superseded_updates[item] = {
"Title": installed_items[item]["Title"],
"KBs": installed_items[item]["KBs"],
}
else:
if not salt.utils.data.is_true(post_info[item]["Installed"]):
failed_updates[item] = {
"Title": post_info[item]["Title"],
"KBs": post_info[item]["KBs"],
}
else:
installed_updates[item] = {
"Title": post_info[item]["Title"],
"NeedsReboot": post_info[item]["NeedsReboot"],
"KBs": post_info[item]["KBs"],
}
comments = []
if installed_updates:
comments.append("Updates installed successfully")
ret["changes"]["installed"] = installed_updates
if failed_updates:
comments.append("Some updates failed to install")
ret["changes"]["failed"] = failed_updates
ret["result"] = False
# Add the list of updates not installed to the return
if superseded_updates:
comments.append("Some updates were superseded")
ret["changes"]["superseded"] = superseded_updates
ret["comment"] = "\n".join(comments)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_wua.py | 0.779825 | 0.239466 | win_wua.py | pypi |
import requests
def __virtual__():
"""Only load if grafana v2.0 is configured."""
if __salt__["config.get"]("grafana_version", 1) == 2:
return True
return (False, "Not configured for grafana_version 2")
def present(
name,
type,
url,
access="proxy",
user="",
password="",
database="",
basic_auth=False,
basic_auth_user="",
basic_auth_password="",
is_default=False,
json_data=None,
profile="grafana",
):
"""
Ensure that a data source is present.
name
Name of the data source.
type
Which type of data source it is ('graphite', 'influxdb' etc.).
url
The URL to the data source API.
user
Optional - user to authenticate with the data source
password
Optional - password to authenticate with the data source
basic_auth
Optional - set to True to use HTTP basic auth to authenticate with the
data source.
basic_auth_user
Optional - HTTP basic auth username.
basic_auth_password
Optional - HTTP basic auth password.
is_default
Default: False
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
datasource = _get_datasource(profile, name)
data = _get_json_data(
name,
type,
url,
access,
user,
password,
database,
basic_auth,
basic_auth_user,
basic_auth_password,
is_default,
json_data,
)
if datasource:
requests.put(
_get_url(profile, datasource["id"]),
data,
headers=_get_headers(profile),
timeout=profile.get("grafana_timeout", 3),
)
ret["result"] = True
ret["changes"] = _diff(datasource, data)
if ret["changes"]["new"] or ret["changes"]["old"]:
ret["comment"] = "Data source {} updated".format(name)
else:
ret["changes"] = {}
ret["comment"] = "Data source {} already up-to-date".format(name)
else:
requests.post(
"{}/api/datasources".format(profile["grafana_url"]),
data,
headers=_get_headers(profile),
timeout=profile.get("grafana_timeout", 3),
)
ret["result"] = True
ret["comment"] = "New data source {} added".format(name)
ret["changes"] = data
return ret
def absent(name, profile="grafana"):
"""
Ensure that a data source is present.
name
Name of the data source to remove.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"result": None, "comment": None, "changes": {}}
datasource = _get_datasource(profile, name)
if not datasource:
ret["result"] = True
ret["comment"] = "Data source {} already absent".format(name)
return ret
requests.delete(
_get_url(profile, datasource["id"]),
headers=_get_headers(profile),
timeout=profile.get("grafana_timeout", 3),
)
ret["result"] = True
ret["comment"] = "Data source {} was deleted".format(name)
return ret
def _get_url(profile, datasource_id):
return "{}/api/datasources/{}".format(profile["grafana_url"], datasource_id)
def _get_datasource(profile, name):
response = requests.get(
"{}/api/datasources".format(profile["grafana_url"]),
headers=_get_headers(profile),
timeout=profile.get("grafana_timeout", 3),
)
data = response.json()
for datasource in data:
if datasource["name"] == name:
return datasource
return None
def _get_headers(profile):
return {
"Accept": "application/json",
"Authorization": "Bearer {}".format(profile["grafana_token"]),
}
def _get_json_data(
name,
type,
url,
access="proxy",
user="",
password="",
database="",
basic_auth=False,
basic_auth_user="",
basic_auth_password="",
is_default=False,
json_data=None,
):
return {
"name": name,
"type": type,
"url": url,
"access": access,
"user": user,
"password": password,
"database": database,
"basicAuth": basic_auth,
"basicAuthUser": basic_auth_user,
"basicAuthPassword": basic_auth_password,
"isDefault": is_default,
"jsonData": json_data,
}
def _diff(old, new):
old_keys = old.keys()
old = old.copy()
new = new.copy()
for key in old_keys:
if key == "id" or key == "orgId":
del old[key]
elif key not in new.keys():
del old[key]
elif old[key] == new[key]:
del old[key]
del new[key]
return {"old": old, "new": new} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grafana_datasource.py | 0.657978 | 0.290993 | grafana_datasource.py | pypi |
import re
def __virtual__():
"""
Only load if Chef execution module is available.
"""
if "chef.client" in __salt__:
return True
return (False, "chef module could not be loaded")
def client(name, **kwargs):
"""
name
Unique identifier for the state. Does not affect the Chef run.
server
The chef server URL
client_key
Set the client key file location
config
The configuration file to use
config-file-jail
Directory under which config files are allowed to be loaded
(no client.rb or knife.rb outside this path will be loaded).
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
localmode
Point chef-client at local repository if True
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
pid
Set the PID file location, defaults to /tmp/chef-client.pid
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation_key
Set the validation key file location, used for registering new clients
"""
return _run(name, "chef.client", kwargs)
def solo(name, **kwargs):
"""
name
Unique identifier for the state. Does not affect the Chef run.
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
"""
return _run(name, "chef.solo", kwargs)
def _run(name, mod, kwargs):
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
result = __salt__[mod](whyrun=__opts__["test"], **kwargs)
if result["retcode"] == 0:
if _has_changes(result["stdout"]):
# Populate the 'changes' dict if anything changed
ret["changes"]["summary"] = _summary(result["stdout"])
ret["result"] = True if not __opts__["test"] else None
else:
ret["result"] = True
else:
ret["result"] = False
ret["comment"] = "\n".join([result["stdout"], result["stderr"]])
return ret
def _summary(stdout):
return stdout.splitlines()[-1]
def _has_changes(stdout):
regex = re.search(
r"(Chef Client finished|Chef Infra Client finished|Infra Phase complete), (\d+)",
_summary(stdout),
re.IGNORECASE,
)
return int(regex.group(2)) > 0 | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/chef.py | 0.58676 | 0.220217 | chef.py | pypi |
import logging
import salt.exceptions
# Get Logging Started
log = logging.getLogger(__name__)
LOGIN_DETAILS = {}
def __virtual__():
return "esxdatacenter"
def mod_init(low):
return True
def datacenter_configured(name):
"""
Makes sure a datacenter exists.
If the state is run by an ``esxdatacenter`` minion, the name of the
datacenter is retrieved from the proxy details, otherwise the datacenter
has the same name as the state.
Supported proxies: esxdatacenter
name:
Datacenter name. Ignored if the proxytype is ``esxdatacenter``.
"""
proxy_type = __salt__["vsphere.get_proxy_type"]()
if proxy_type == "esxdatacenter":
dc_name = __salt__["esxdatacenter.get_details"]()["datacenter"]
else:
dc_name = name
log.info("Running datacenter_configured for datacenter '%s'", dc_name)
ret = {"name": name, "changes": {}, "result": None, "comment": "Default"}
comments = []
si = None
try:
si = __salt__["vsphere.get_service_instance_via_proxy"]()
dcs = __salt__["vsphere.list_datacenters_via_proxy"](
datacenter_names=[dc_name], service_instance=si
)
if not dcs:
if __opts__["test"]:
comments.append("State will create datacenter '{}'.".format(dc_name))
else:
log.debug("Creating datacenter '%s'", dc_name)
__salt__["vsphere.create_datacenter"](dc_name, si)
comments.append("Created datacenter '{}'.".format(dc_name))
log.info(comments[-1])
ret["changes"].update({"new": {"name": dc_name}})
else:
comments.append(
"Datacenter '{}' already exists. Nothing to be done.".format(dc_name)
)
log.info(comments[-1])
__salt__["vsphere.disconnect"](si)
ret["comment"] = "\n".join(comments)
ret["result"] = None if __opts__["test"] and ret["changes"] else True
return ret
except salt.exceptions.CommandExecutionError as exc:
log.error("Error: %s", exc)
if si:
__salt__["vsphere.disconnect"](si)
ret.update(
{"result": False if not __opts__["test"] else None, "comment": str(exc)}
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/esxdatacenter.py | 0.471467 | 0.255587 | esxdatacenter.py | pypi |
__virtualname__ = "debconf"
def __virtual__():
"""
Confirm this module is on a Debian based system
"""
if __grains__["os_family"] != "Debian":
return (False, "debconf state only runs on Debian systems")
# Check that debconf was loaded
if "debconf.show" not in __salt__:
return (False, "debconf module could not be loaded")
return __virtualname__
def set_file(name, source, template=None, context=None, defaults=None, **kwargs):
"""
Set debconf selections from a file or a template
.. code-block:: yaml
<state_id>:
debconf.set_file:
- source: salt://pathto/pkg.selections
<state_id>:
debconf.set_file:
- source: salt://pathto/pkg.selections?saltenv=myenvironment
<state_id>:
debconf.set_file:
- source: salt://pathto/pkg.selections.jinja2
- template: jinja
- context:
some_value: "false"
source:
The location of the file containing the package selections
template
If this setting is applied then the named templating engine will be
used to render the package selections file, currently jinja, mako, and
wempy are supported
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if context is None:
context = {}
elif not isinstance(context, dict):
ret["result"] = False
ret["comment"] = "Context must be formed as a dict"
return ret
if defaults is None:
defaults = {}
elif not isinstance(defaults, dict):
ret["result"] = False
ret["comment"] = "Defaults must be formed as a dict"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Debconf selections would have been set."
return ret
if template:
result = __salt__["debconf.set_template"](
source, template, context, defaults, **kwargs
)
else:
result = __salt__["debconf.set_file"](source, **kwargs)
if result:
ret["comment"] = "Debconf selections were set."
else:
ret["result"] = False
ret["comment"] = "Unable to set debconf selections from file."
return ret
def set(name, data, **kwargs):
"""
Set debconf selections
.. code-block:: yaml
<state_id>:
debconf.set:
- name: <name>
- data:
<question>: {'type': <type>, 'value': <value>}
<question>: {'type': <type>, 'value': <value>}
name:
The package name to set answers for.
data:
A set of questions/answers for debconf. Note that everything under
this must be indented twice.
question:
The question the is being pre-answered
type:
The type of question that is being asked (string, boolean, select, etc.)
value:
The answer to the question
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
current = __salt__["debconf.show"](name)
for (key, args) in data.items():
# For debconf data, valid booleans are 'true' and 'false';
# But str()'ing the args['value'] will result in 'True' and 'False'
# which will be ignored and overridden by a dpkg-reconfigure.
# So we should manually set these values to lowercase ones,
# before any str() call is performed.
if args["type"] == "boolean":
args["value"] = "true" if args["value"] else "false"
if current is not None and [key, args["type"], str(args["value"])] in current:
if ret["comment"] == "":
ret["comment"] = "Unchanged answers: "
ret["comment"] += "{} ".format(key)
else:
if __opts__["test"]:
ret["result"] = None
ret["changes"][key] = "New value: {}".format(args["value"])
else:
if __salt__["debconf.set"](name, key, args["type"], args["value"]):
if args["type"] == "password":
ret["changes"][key] = "(password hidden)"
else:
ret["changes"][key] = "{}".format(args["value"])
else:
ret["result"] = False
ret["comment"] = "Some settings failed to be applied."
ret["changes"][key] = "Failed to set!"
if not ret["changes"]:
ret["comment"] = "All specified answers are already set"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/debconfmod.py | 0.589598 | 0.179333 | debconfmod.py | pypi |
r"""
Manage the Windows registry
===========================
Many python developers think of registry keys as if they were python keys in a
dictionary which is not the case. The windows registry is broken down into the
following components:
Hives
-----
This is the top level of the registry. They all begin with HKEY.
- HKEY_CLASSES_ROOT (HKCR)
- HKEY_CURRENT_USER(HKCU)
- HKEY_LOCAL MACHINE (HKLM)
- HKEY_USER (HKU)
- HKEY_CURRENT_CONFIG
Keys
----
Hives contain keys. These are basically the folders beneath the hives. They can
contain any number of subkeys.
When passing the hive\key values they must be quoted correctly depending on the
backslashes being used (``\`` vs ``\\``). The way backslashes are handled in
the state file is different from the way they are handled when working on the
CLI. The following are valid methods of passing the hive\key:
Using single backslashes:
HKLM\SOFTWARE\Python
'HKLM\SOFTWARE\Python'
Using double backslashes:
"HKLM\\SOFTWARE\\Python"
Values or Entries
-----------------
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. The name is ``(Default)`` with a displayed value
of ``(value not set)``. The actual value is Null.
Example
-------
The following example is taken from the windows startup portion of the registry:
.. code-block:: text
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
In this example these are the values for each:
Hive:
``HKEY_LOCAL_MACHINE``
Key and subkeys:
``SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run``
Value:
- There are 3 value names: ``RTHDVCPL``, ``NvBackend``, and ``BTMTrayAgent``
- Each value name has a corresponding value
"""
import logging
import salt.utils.stringutils
log = logging.getLogger(__name__)
def __virtual__():
"""
Load this state if the reg module exists
"""
if "reg.read_value" not in __utils__:
return (
False,
"reg state module failed to load: missing util function: reg.read_value",
)
if "reg.set_value" not in __utils__:
return (
False,
"reg state module failed to load: missing util function: reg.set_value",
)
if "reg.delete_value" not in __utils__:
return (
False,
"reg state module failed to load: missing util function: reg.delete_value",
)
if "reg.delete_key_recursive" not in __utils__:
return (
False,
"reg state module failed to load: "
"missing util function: reg.delete_key_recursive",
)
return "reg"
def _parse_key(key):
"""
split the hive from the key
"""
splt = key.split("\\")
hive = splt.pop(0)
key = "\\".join(splt)
return hive, key
def present(
name,
vname=None,
vdata=None,
vtype="REG_SZ",
use_32bit_registry=False,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
):
r"""
Ensure a registry key or value is present.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (``vname``) is passed,
this will be the data for that value name. If not, this will be the
``(Default)`` value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary automatically. To pass binary data, use the built-in
yaml tag ``!!binary`` to denote the actual binary
characters. For example, the following lines will both set
the same data in the registry:
- ``vdata: Salty Test``
- ``vdata: !!binary U2FsdHkgVGVzdA==\n``
For more information about the ``!!binary`` tag see
`here <http://yaml.org/type/binary.html>`_
.. note::
The type for the ``(Default)`` value is always REG_SZ and cannot
be changed. This parameter is optional. If not passed, the Key
will be created with no associated item/value pairs.
vtype (str):
The value type for the data you wish to store in the registry. Valid
values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ (Default)
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
win_owner (str):
The owner of the registry key. If this is not passed, the account
under which Salt is running will be used.
.. note::
Owner is set for the key that contains the value/data pair. You
cannot set ownership on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms (dict):
A dictionary containing permissions to grant and their propagation.
If not passed the 'Grant` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
For each user specify the account name, with a sub dict for the
permissions to grant and the 'Applies to' setting. For example:
``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_key_subkeys'}}``. ``perms`` must be specified.
Registry permissions are specified using the ``perms`` key. You can
specify a single basic permission or a list of advanced perms. The
following are valid perms:
Basic (passed as a string):
- full_control
- read
- write
Advanced (passed as a list):
- delete
- query_value
- set_value
- create_subkey
- enum_subkeys
- notify
- create_link
- read_control
- write_dac
- write_owner
The 'Applies to' setting is optional. It is specified using the
``applies_to`` key. If not specified ``this_key_subkeys`` is used.
Valid options are:
Applies to settings:
- this_key_only
- this_key_subkeys
- subkeys_only
.. versionadded:: 2019.2.0
win_deny_perms (dict):
A dictionary containing permissions to deny and their propagation.
If not passed the `Deny` permissions will not be modified.
.. note::
Permissions are set for the key that contains the value/data
pair. You cannot set permissions on value/data pairs themselves.
Valid options are the same as those specified in ``win_perms``
.. note::
'Deny' permissions always take precedence over 'grant'
permissions.
.. versionadded:: 2019.2.0
win_inheritance (bool):
``True`` to inherit permissions from the parent key. ``False`` to
disable inheritance. Default is ``True``.
.. note::
Inheritance is set for the key that contains the value/data
pair. You cannot set inheritance on value/data pairs themselves.
.. versionadded:: 2019.2.0
win_perms_reset (bool):
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``
.. note::
Perms are reset for the key that contains the value/data pair.
You cannot set permissions on value/data pairs themselves.
.. versionadded:: 2019.2.0
Returns:
dict: A dictionary showing the results of the registry operation.
Example:
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
Example:
The following example will set the value for the ``version`` entry under
the ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to
``2016.3.1``. The value will be reflected in ``Wow6432Node``:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
Example:
Binary data can be set in two ways. The following two examples will set
a binary value of ``Salty Test``
.. code-block:: yaml
no_conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state
- vdata: Salty Test
- vtype: REG_BINARY
conversion:
reg.present:
- name: HKLM\SOFTWARE\SaltTesting
- vname: test_reg_binary_state_with_tag
- vdata: !!binary U2FsdHkgVGVzdA==\n
- vtype: REG_BINARY
Example:
To set a ``REG_MULTI_SZ`` value:
.. code-block:: yaml
reg_multi_sz:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: reg_multi_sz
- vdata:
- list item 1
- list item 2
Example:
To ensure a key is present and has permissions:
.. code-block:: yaml
set_key_permissions:
reg.present:
- name: HKLM\SOFTWARE\Salt
- vname: version
- vdata: 2016.3.1
- win_owner: Administrators
- win_perms:
jsnuffy:
perms: full_control
sjones:
perms:
- read_control
- enum_subkeys
- query_value
applies_to:
- this_key_only
- win_deny_perms:
bsimpson:
perms: full_control
applies_to: this_key_subkeys
- win_inheritance: True
- win_perms_reset: True
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
hive, key = _parse_key(name)
# Determine what to do
reg_current = __utils__["reg.read_value"](
hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry
)
# Cast the vdata according to the vtype
vdata_decoded = __utils__["reg.cast_vdata"](vdata=vdata, vtype=vtype)
# Check if the key already exists
# If so, check perms
# We check `vdata` and `success` because `vdata` can be None
if vdata_decoded == reg_current["vdata"] and reg_current["success"]:
ret["comment"] = "{} in {} is already present".format(
salt.utils.stringutils.to_unicode(vname, "utf-8") if vname else "(Default)",
salt.utils.stringutils.to_unicode(name, "utf-8"),
)
return __utils__["dacl.check_perms"](
obj_name="\\".join([hive, key]),
obj_type="registry32" if use_32bit_registry else "registry",
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset,
)
add_change = {
"Key": r"{}\{}".format(hive, key),
"Entry": "{}".format(
salt.utils.stringutils.to_unicode(vname, "utf-8") if vname else "(Default)"
),
"Value": vdata_decoded,
"Owner": win_owner,
"Perms": {"Grant": win_perms, "Deny": win_deny_perms},
"Inheritance": win_inheritance,
}
# Check for test option
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"reg": {"Will add": add_change}}
return ret
# Configure the value
ret["result"] = __utils__["reg.set_value"](
hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry,
)
if not ret["result"]:
ret["changes"] = {}
ret["comment"] = r"Failed to add {} to {}\{}".format(vname, hive, key)
else:
ret["changes"] = {"reg": {"Added": add_change}}
ret["comment"] = r"Added {} to {}\{}".format(vname, hive, key)
if ret["result"]:
ret = __utils__["dacl.check_perms"](
obj_name="\\".join([hive, key]),
obj_type="registry32" if use_32bit_registry else "registry",
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset,
)
return ret
def absent(name, vname=None, use_32bit_registry=False):
r"""
Ensure a registry value is removed. To remove a key use key_absent.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
Returns:
dict: A dictionary showing the results of the registry operation.
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was
not passed, the ``(Default)`` value would be deleted.
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
hive, key = _parse_key(name)
# Determine what to do
reg_check = __utils__["reg.read_value"](
hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry
)
if not reg_check["success"] or reg_check["vdata"] == "(value not set)":
ret["comment"] = "{} is already absent".format(name)
return ret
remove_change = {
"Key": r"{}\{}".format(hive, key),
"Entry": "{}".format(vname if vname else "(Default)"),
}
# Check for test option
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"reg": {"Will remove": remove_change}}
return ret
# Delete the value
ret["result"] = __utils__["reg.delete_value"](
hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry
)
if not ret["result"]:
ret["changes"] = {}
ret["comment"] = r"Failed to remove {} from {}".format(key, hive)
else:
ret["changes"] = {"reg": {"Removed": remove_change}}
ret["comment"] = r"Removed {} from {}".format(key, hive)
return ret
def key_absent(name, use_32bit_registry=False):
r"""
.. versionadded:: 2015.5.4
Ensure a registry key is removed. This will remove the key, subkeys, and all
value entries.
Args:
name (str):
A string representing the full path to the key to be removed to
include the hive and the keypath. The hive can be any of the
following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
Returns:
dict: A dictionary showing the results of the registry operation.
CLI Example:
The following example will delete the ``SOFTWARE\DeleteMe`` key in the
``HKEY_LOCAL_MACHINE`` hive including all its subkeys and value pairs.
.. code-block:: yaml
remove_key_demo:
reg.key_absent:
- name: HKEY_CURRENT_USER\SOFTWARE\DeleteMe
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\DeleteMe`` is the key
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
hive, key = _parse_key(name)
# Determine what to do
if not __utils__["reg.read_value"](
hive=hive, key=key, use_32bit_registry=use_32bit_registry
)["success"]:
ret["comment"] = "{} is already absent".format(name)
return ret
ret["changes"] = {"reg": {"Removed": {"Key": r"{}\{}".format(hive, key)}}}
# Check for test option
if __opts__["test"]:
ret["result"] = None
return ret
# Delete the value
__utils__["reg.delete_key_recursive"](
hive=hive, key=key, use_32bit_registry=use_32bit_registry
)
if __utils__["reg.read_value"](
hive=hive, key=key, use_32bit_registry=use_32bit_registry
)["success"]:
ret["result"] = False
ret["changes"] = {}
ret["comment"] = "Failed to remove registry key {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/reg.py | 0.814459 | 0.376566 | reg.py | pypi |
def __virtual__():
if "wordpress.show_plugin" in __salt__:
return True
return (False, "wordpress module could not be loaded")
def installed(name, user, admin_user, admin_password, admin_email, title, url):
"""
Run the initial setup of wordpress
name
path to the wordpress installation
user
user that owns the files for the wordpress installation
admin_user
username for wordpress website administrator user
admin_password
password for wordpress website administrator user
admin_email
email for wordpress website administrator user
title
title for the wordpress website
url
url for the wordpress website
.. code-block:: yaml
/var/www/html:
wordpress.installed:
- title: Daniel's Awesome Blog
- user: apache
- admin_user: dwallace
- admin_email: dwallace@example.com
- admin_password: password123
- url: https://blog.dwallace.com
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
check = __salt__["wordpress.is_installed"](name, user)
if check:
ret["result"] = True
ret["comment"] = "Wordpress is already installed: {}".format(name)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = "Wordpress will be installed: {}".format(name)
return ret
resp = __salt__["wordpress.install"](
name, user, admin_user, admin_password, admin_email, title, url
)
if resp:
ret["result"] = True
ret["comment"] = "Wordpress Installed: {}".format(name)
ret["changes"] = {"new": resp}
else:
ret["comment"] = "Failed to install wordpress: {}".format(name)
return ret
def activated(name, path, user):
"""
Activate wordpress plugins
name
name of plugin to activate
path
path to wordpress installation
user
user who should own the files in the wordpress installation
.. code-block:: yaml
HyperDB:
wordpress.activated:
- path: /var/www/html
- user: apache
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
check = __salt__["wordpress.show_plugin"](name, path, user)
if check["status"] == "active":
ret["result"] = True
ret["comment"] = "Plugin already activated: {}".format(name)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = "Plugin will be activated: {}".format(name)
return ret
resp = __salt__["wordpress.activate"](name, path, user)
if resp is True:
ret["result"] = True
ret["comment"] = "Plugin activated: {}".format(name)
ret["changes"] = {
"old": check,
"new": __salt__["wordpress.show_plugin"](name, path, user),
}
elif resp is None:
ret["result"] = True
ret["comment"] = "Plugin already activated: {}".format(name)
ret["changes"] = {
"old": check,
"new": __salt__["wordpress.show_plugin"](name, path, user),
}
else:
ret["comment"] = "Plugin failed to activate: {}".format(name)
return ret
def deactivated(name, path, user):
"""
Deactivate wordpress plugins
name
name of plugin to deactivate
path
path to wordpress installation
user
user who should own the files in the wordpress installation
.. code-block:: yaml
HyperDB:
wordpress.deactivated:
- path: /var/www/html
- user: apache
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
check = __salt__["wordpress.show_plugin"](name, path, user)
if check["status"] == "inactive":
ret["result"] = True
ret["comment"] = "Plugin already deactivated: {}".format(name)
return ret
elif __opts__["test"]:
ret["result"] = None
ret["comment"] = "Plugin will be deactivated: {}".format(name)
return ret
resp = __salt__["wordpress.deactivate"](name, path, user)
if resp is True:
ret["result"] = True
ret["comment"] = "Plugin deactivated: {}".format(name)
ret["changes"] = {
"old": check,
"new": __salt__["wordpress.show_plugin"](name, path, user),
}
elif resp is None:
ret["result"] = True
ret["comment"] = "Plugin already deactivated: {}".format(name)
ret["changes"] = {
"old": check,
"new": __salt__["wordpress.show_plugin"](name, path, user),
}
else:
ret["comment"] = "Plugin failed to deactivate: {}".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/wordpress.py | 0.453988 | 0.194349 | wordpress.py | pypi |
def __virtual__():
"""
Only load if the mssql module is present
"""
if "mssql.version" in __salt__:
return True
return (False, "mssql module could not be loaded")
def present(name, owner=None, grants=None, **kwargs):
"""
Ensure that the named database is present with the specified options
name
The name of the database to manage
owner
Adds owner using AUTHORIZATION option
Grants
Can only be a list of strings
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if __salt__["mssql.role_exists"](name, **kwargs):
ret[
"comment"
] = "Role {} is already present (Not going to try to set its grants)".format(
name
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Role {} is set to be added".format(name)
return ret
role_created = __salt__["mssql.role_create"](
name, owner=owner, grants=grants, **kwargs
)
if (
role_created is not True
): # Non-empty strings are also evaluated to True, so we cannot use if not role_created:
ret["result"] = False
ret["comment"] += "Role {} failed to be created: {}".format(name, role_created)
return ret
ret["comment"] += "Role {} has been added".format(name)
ret["changes"][name] = "Present"
return ret
def absent(name, **kwargs):
"""
Ensure that the named database is absent
name
The name of the database to remove
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not __salt__["mssql.role_exists"](name):
ret["comment"] = "Role {} is not present".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Role {} is set to be removed".format(name)
return ret
if __salt__["mssql.role_remove"](name, **kwargs):
ret["comment"] = "Role {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
# else:
ret["result"] = False
ret["comment"] = "Role {} failed to be removed".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/mssql_role.py | 0.575349 | 0.212497 | mssql_role.py | pypi |
def __virtual__():
"""
Only make these states available if Open vSwitch module is available.
"""
if "openvswitch.bridge_create" in __salt__:
return True
return (False, "openvswitch module could not be loaded")
def present(name):
"""
Ensures that the named bridge exists, eventually creates it.
Args:
name: The name of the bridge.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_bridge_created = "Bridge {} created.".format(name)
comment_bridge_notcreated = "Unable to create bridge: {}.".format(name)
comment_bridge_exists = "Bridge {} already exists.".format(name)
changes_bridge_created = {
name: {
"old": "Bridge {} does not exist.".format(name),
"new": "Bridge {} created".format(name),
}
}
bridge_exists = __salt__["openvswitch.bridge_exists"](name)
# Dry run, test=true mode
if __opts__["test"]:
if bridge_exists:
ret["result"] = True
ret["comment"] = comment_bridge_exists
else:
ret["result"] = None
ret["comment"] = comment_bridge_created
return ret
if bridge_exists:
ret["result"] = True
ret["comment"] = comment_bridge_exists
else:
bridge_create = __salt__["openvswitch.bridge_create"](name)
if bridge_create:
ret["result"] = True
ret["comment"] = comment_bridge_created
ret["changes"] = changes_bridge_created
else:
ret["result"] = False
ret["comment"] = comment_bridge_notcreated
return ret
def absent(name):
"""
Ensures that the named bridge does not exist, eventually deletes it.
Args:
name: The name of the bridge.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_bridge_deleted = "Bridge {} deleted.".format(name)
comment_bridge_notdeleted = "Unable to delete bridge: {}.".format(name)
comment_bridge_notexists = "Bridge {} does not exist.".format(name)
changes_bridge_deleted = {
name: {
"old": "Bridge {} exists.".format(name),
"new": "Bridge {} deleted.".format(name),
}
}
bridge_exists = __salt__["openvswitch.bridge_exists"](name)
# Dry run, test=true mode
if __opts__["test"]:
if not bridge_exists:
ret["result"] = True
ret["comment"] = comment_bridge_notexists
else:
ret["result"] = None
ret["comment"] = comment_bridge_deleted
return ret
if not bridge_exists:
ret["result"] = True
ret["comment"] = comment_bridge_notexists
else:
bridge_delete = __salt__["openvswitch.bridge_delete"](name)
if bridge_delete:
ret["result"] = True
ret["comment"] = comment_bridge_deleted
ret["changes"] = changes_bridge_deleted
else:
ret["result"] = False
ret["comment"] = comment_bridge_notdeleted
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/openvswitch_bridge.py | 0.732496 | 0.175662 | openvswitch_bridge.py | pypi |
import logging
from datetime import datetime
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = "zfs"
# Compare modifiers for zfs.schedule_snapshot
comp_hour = {"minute": 0}
comp_day = {"minute": 0, "hour": 0}
comp_month = {"minute": 0, "hour": 0, "day": 1}
comp_year = {"minute": 0, "hour": 0, "day": 1, "month": 1}
def __virtual__():
"""
Provides zfs state
"""
if not __grains__.get("zfs_support"):
return False, "The zfs state cannot be loaded: zfs not supported"
return __virtualname__
def _absent(name, dataset_type, force=False, recursive=False):
"""
internal shared function for *_absent
name : string
name of dataset
dataset_type : string [filesystem, volume, snapshot, or bookmark]
type of dataset to remove
force : boolean
try harder to destroy the dataset
recursive : boolean
also destroy all the child datasets
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## log configuration
dataset_type = dataset_type.lower()
log.debug("zfs.%s_absent::%s::config::force = %s", dataset_type, name, force)
log.debug(
"zfs.%s_absent::%s::config::recursive = %s", dataset_type, name, recursive
)
## destroy dataset if needed
if __salt__["zfs.exists"](name, **{"type": dataset_type}):
## NOTE: dataset found with the name and dataset_type
if not __opts__["test"]:
mod_res = __salt__["zfs.destroy"](
name, **{"force": force, "recursive": recursive}
)
else:
mod_res = OrderedDict([("destroyed", True)])
ret["result"] = mod_res["destroyed"]
if ret["result"]:
ret["changes"][name] = "destroyed"
ret["comment"] = "{} {} was destroyed".format(
dataset_type,
name,
)
else:
ret["comment"] = "failed to destroy {} {}".format(
dataset_type,
name,
)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
else:
## NOTE: no dataset found with name of the dataset_type
ret["comment"] = "{} {} is absent".format(dataset_type, name)
return ret
def filesystem_absent(name, force=False, recursive=False):
"""
ensure filesystem is absent on the system
name : string
name of filesystem
force : boolean
try harder to destroy the dataset (zfs destroy -f)
recursive : boolean
also destroy all the child datasets (zfs destroy -r)
.. warning::
If a volume with ``name`` exists, this state will succeed without
destroying the volume specified by ``name``. This module is dataset type sensitive.
"""
if not __utils__["zfs.is_dataset"](name):
ret = {
"name": name,
"changes": {},
"result": False,
"comment": "invalid dataset name: {}".format(name),
}
else:
ret = _absent(name, "filesystem", force, recursive)
return ret
def volume_absent(name, force=False, recursive=False):
"""
ensure volume is absent on the system
name : string
name of volume
force : boolean
try harder to destroy the dataset (zfs destroy -f)
recursive : boolean
also destroy all the child datasets (zfs destroy -r)
.. warning::
If a filesystem with ``name`` exists, this state will succeed without
destroying the filesystem specified by ``name``. This module is dataset type sensitive.
"""
if not __utils__["zfs.is_dataset"](name):
ret = {
"name": name,
"changes": {},
"result": False,
"comment": "invalid dataset name: {}".format(name),
}
else:
ret = _absent(name, "volume", force, recursive)
return ret
def snapshot_absent(name, force=False, recursive=False):
"""
ensure snapshot is absent on the system
name : string
name of snapshot
force : boolean
try harder to destroy the dataset (zfs destroy -f)
recursive : boolean
also destroy all the child datasets (zfs destroy -r)
"""
if not __utils__["zfs.is_snapshot"](name):
ret = {
"name": name,
"changes": {},
"result": False,
"comment": "invalid snapshot name: {}".format(name),
}
else:
ret = _absent(name, "snapshot", force, recursive)
return ret
def bookmark_absent(name, force=False, recursive=False):
"""
ensure bookmark is absent on the system
name : string
name of snapshot
force : boolean
try harder to destroy the dataset (zfs destroy -f)
recursive : boolean
also destroy all the child datasets (zfs destroy -r)
"""
if not __utils__["zfs.is_bookmark"](name):
ret = {
"name": name,
"changes": {},
"result": False,
"comment": "invalid bookmark name: {}".format(name),
}
else:
ret = _absent(name, "bookmark", force, recursive)
return ret
def hold_absent(name, snapshot, recursive=False):
"""
ensure hold is absent on the system
name : string
name of hold
snapshot : string
name of snapshot
recursive : boolean
recursively releases a hold with the given tag on the snapshots of all descendent file systems.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## log configuration
log.debug("zfs.hold_absent::%s::config::snapshot = %s", name, snapshot)
log.debug("zfs.hold_absent::%s::config::recursive = %s", name, recursive)
## check we have a snapshot/tag name
if not __utils__["zfs.is_snapshot"](snapshot):
ret["result"] = False
ret["comment"] = "invalid snapshot name: {}".format(snapshot)
return ret
if (
__utils__["zfs.is_snapshot"](name)
or __utils__["zfs.is_bookmark"](name)
or name == "error"
):
ret["result"] = False
ret["comment"] = "invalid tag name: {}".format(name)
return ret
## release hold if required
holds = __salt__["zfs.holds"](snapshot)
if name in holds:
## NOTE: hold found for snapshot, release it
if not __opts__["test"]:
mod_res = __salt__["zfs.release"](
name, snapshot, **{"recursive": recursive}
)
else:
mod_res = OrderedDict([("released", True)])
ret["result"] = mod_res["released"]
if ret["result"]:
ret["changes"] = {snapshot: {name: "released"}}
ret["comment"] = "hold {} released".format(
name,
)
else:
ret["comment"] = "failed to release hold {}".format(
name,
)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
elif "error" in holds:
## NOTE: we have an error
ret["result"] = False
ret["comment"] = holds["error"]
else:
## NOTE: no hold found with name for snapshot
ret["comment"] = "hold {} is absent".format(
name,
)
return ret
def hold_present(name, snapshot, recursive=False):
"""
ensure hold is present on the system
name : string
name of holdt
snapshot : string
name of snapshot
recursive : boolean
recursively add hold with the given tag on the snapshots of all descendent file systems.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## log configuration
log.debug("zfs.hold_present::%s::config::snapshot = %s", name, snapshot)
log.debug("zfs.hold_present::%s::config::recursive = %s", name, recursive)
## check we have a snapshot/tag name
if not __utils__["zfs.is_snapshot"](snapshot):
ret["result"] = False
ret["comment"] = "invalid snapshot name: {}".format(snapshot)
return ret
if (
__utils__["zfs.is_snapshot"](name)
or __utils__["zfs.is_bookmark"](name)
or name == "error"
):
ret["result"] = False
ret["comment"] = "invalid tag name: {}".format(name)
return ret
## place hold if required
holds = __salt__["zfs.holds"](snapshot)
if name in holds:
## NOTE: hold with name already exists for snapshot
ret["comment"] = "hold {} is present for {}".format(
name,
snapshot,
)
else:
## NOTE: no hold found with name for snapshot
if not __opts__["test"]:
mod_res = __salt__["zfs.hold"](name, snapshot, **{"recursive": recursive})
else:
mod_res = OrderedDict([("held", True)])
ret["result"] = mod_res["held"]
if ret["result"]:
ret["changes"] = OrderedDict([(snapshot, OrderedDict([(name, "held")]))])
ret["comment"] = "hold {} added to {}".format(name, snapshot)
else:
ret["comment"] = "failed to add hold {} to {}".format(name, snapshot)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
return ret
def _dataset_present(
dataset_type,
name,
properties,
volume_size=None,
sparse=False,
create_parent=False,
cloned_from=None,
):
"""
internal handler for filesystem_present/volume_present
dataset_type : string
volume or filesystem
name : string
name of volume
volume_size : string
size of volume
sparse : boolean
create sparse volume
create_parent : boolean
creates all the non-existing parent datasets.
any property specified on the command line using the -o option is ignored.
cloned_from : string
name of snapshot to clone
properties : dict
additional zfs properties (-o)
.. note::
``cloned_from`` is only use if the volume does not exist yet,
when ``cloned_from`` is set after the volume exists it will be ignored.
.. note::
Properties do not get cloned, if you specify the properties in the state file
they will be applied on a subsequent run.
``volume_size`` is considered a property, so the volume's size will be
corrected when the properties get updated if it differs from the
original volume.
The sparse parameter is ignored when using ``cloned_from``.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## fallback dataset_type to filesystem if out of range
if dataset_type not in ["filesystem", "volume"]:
dataset_type = "filesystem"
## ensure properties are zfs values
if properties is None:
properties = {}
properties = __utils__["zfs.from_auto_dict"](properties)
if volume_size:
## NOTE: add volsize to properties
volume_size = __utils__["zfs.from_size"](volume_size)
properties.update({"volsize": volume_size})
# The sorted isn't necessary for proper behavior, but it helps for the unit
# tests.
propnames = ",".join(sorted(properties.keys()))
## log configuration
log.debug(
"zfs.%s_present::%s::config::volume_size = %s", dataset_type, name, volume_size
)
log.debug("zfs.%s_present::%s::config::sparse = %s", dataset_type, name, sparse)
log.debug(
"zfs.%s_present::%s::config::create_parent = %s",
dataset_type,
name,
create_parent,
)
log.debug(
"zfs.%s_present::%s::config::cloned_from = %s", dataset_type, name, cloned_from
)
log.debug(
"zfs.%s_present::%s::config::properties = %s", dataset_type, name, properties
)
## check we have valid filesystem name/volume name/clone snapshot
if not __utils__["zfs.is_dataset"](name):
ret["result"] = False
ret["comment"] = "invalid dataset name: {}".format(name)
return ret
if cloned_from and not __utils__["zfs.is_snapshot"](cloned_from):
ret["result"] = False
ret["comment"] = "{} is not a snapshot".format(cloned_from)
return ret
## ensure dataset is in correct state
## NOTE: update the dataset
exists = __salt__["zfs.exists"](name, **{"type": dataset_type})
if exists and len(properties) == 0:
ret["comment"] = "{} {} is uptodate".format(dataset_type, name)
elif exists and len(properties) > 0:
## NOTE: fetch current volume properties
properties_current = __salt__["zfs.get"](
name,
properties=propnames,
type=dataset_type,
fields="value",
depth=0,
parsable=True,
).get(name, OrderedDict())
## NOTE: build list of properties to update
properties_update = []
for prop in properties:
## NOTE: skip unexisting properties
if prop not in properties_current:
log.warning(
"zfs.%s_present::%s::update - unknown property: %s",
dataset_type,
name,
prop,
)
continue
## NOTE: compare current and wanted value
if properties_current[prop]["value"] != properties[prop]:
properties_update.append(prop)
## NOTE: update pool properties
for prop in properties_update:
if not __opts__["test"]:
mod_res = __salt__["zfs.set"](name, **{prop: properties[prop]})
else:
mod_res = OrderedDict([("set", True)])
if mod_res["set"]:
if name not in ret["changes"]:
ret["changes"][name] = {}
ret["changes"][name][prop] = properties[prop]
else:
ret["result"] = False
if ret["comment"] == "":
ret["comment"] = "The following properties were not updated:"
ret["comment"] = "{} {}".format(ret["comment"], prop)
## NOTE: update comment
if ret["result"] and name in ret["changes"]:
ret["comment"] = "{} {} was updated".format(dataset_type, name)
elif ret["result"]:
ret["comment"] = "{} {} is uptodate".format(dataset_type, name)
else:
ret["comment"] = "{} {} failed to be updated".format(dataset_type, name)
## NOTE: create or clone the dataset
elif not exists:
mod_res_action = "cloned" if cloned_from else "created"
if __opts__["test"]:
## NOTE: pretend to create/clone
mod_res = OrderedDict([(mod_res_action, True)])
elif cloned_from:
## NOTE: add volsize to properties
if volume_size:
properties["volsize"] = volume_size
## NOTE: clone the dataset
mod_res = __salt__["zfs.clone"](
cloned_from,
name,
**{"create_parent": create_parent, "properties": properties}
)
else:
## NOTE: create the dataset
mod_res = __salt__["zfs.create"](
name,
**{
"create_parent": create_parent,
"properties": properties,
"volume_size": volume_size,
"sparse": sparse,
}
)
ret["result"] = mod_res[mod_res_action]
if ret["result"]:
ret["changes"][name] = mod_res_action
if properties:
ret["changes"][name] = properties
ret["comment"] = "{} {} was {}".format(
dataset_type,
name,
mod_res_action,
)
else:
ret["comment"] = "failed to {} {} {}".format(
mod_res_action[:-1],
dataset_type,
name,
)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
return ret
def filesystem_present(name, create_parent=False, properties=None, cloned_from=None):
"""
ensure filesystem exists and has properties set
name : string
name of filesystem
create_parent : boolean
creates all the non-existing parent datasets.
any property specified on the command line using the -o option is ignored.
cloned_from : string
name of snapshot to clone
properties : dict
additional zfs properties (-o)
.. note::
``cloned_from`` is only use if the filesystem does not exist yet,
when ``cloned_from`` is set after the filesystem exists it will be ignored.
.. note::
Properties do not get cloned, if you specify the properties in the
state file they will be applied on a subsequent run.
"""
return _dataset_present(
"filesystem",
name,
properties,
create_parent=create_parent,
cloned_from=cloned_from,
)
def volume_present(
name,
volume_size,
sparse=False,
create_parent=False,
properties=None,
cloned_from=None,
):
"""
ensure volume exists and has properties set
name : string
name of volume
volume_size : string
size of volume
sparse : boolean
create sparse volume
create_parent : boolean
creates all the non-existing parent datasets.
any property specified on the command line using the -o option is ignored.
cloned_from : string
name of snapshot to clone
properties : dict
additional zfs properties (-o)
.. note::
``cloned_from`` is only use if the volume does not exist yet,
when ``cloned_from`` is set after the volume exists it will be ignored.
.. note::
Properties do not get cloned, if you specify the properties in the state file
they will be applied on a subsequent run.
``volume_size`` is considered a property, so the volume's size will be
corrected when the properties get updated if it differs from the
original volume.
The sparse parameter is ignored when using ``cloned_from``.
"""
return _dataset_present(
"volume",
name,
properties,
volume_size,
sparse=sparse,
create_parent=create_parent,
cloned_from=cloned_from,
)
def bookmark_present(name, snapshot):
"""
ensure bookmark exists
name : string
name of bookmark
snapshot : string
name of snapshot
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## log configuration
log.debug("zfs.bookmark_present::%s::config::snapshot = %s", name, snapshot)
## check we have valid snapshot/bookmark name
if not __utils__["zfs.is_snapshot"](snapshot):
ret["result"] = False
ret["comment"] = "invalid snapshot name: {}".format(name)
return ret
if "#" not in name and "/" not in name:
## NOTE: simple snapshot name
# take the snapshot name and replace the snapshot but with the simple name
# e.g. pool/fs@snap + bm --> pool/fs#bm
name = "{}#{}".format(snapshot[: snapshot.index("@")], name)
ret["name"] = name
if not __utils__["zfs.is_bookmark"](name):
ret["result"] = False
ret["comment"] = "invalid bookmark name: {}".format(name)
return ret
## ensure bookmark exists
if not __salt__["zfs.exists"](name, **{"type": "bookmark"}):
## NOTE: bookmark the snapshot
if not __opts__["test"]:
mod_res = __salt__["zfs.bookmark"](snapshot, name)
else:
mod_res = OrderedDict([("bookmarked", True)])
ret["result"] = mod_res["bookmarked"]
if ret["result"]:
ret["changes"][name] = snapshot
ret["comment"] = "{} bookmarked as {}".format(snapshot, name)
else:
ret["comment"] = "failed to bookmark {}".format(snapshot)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
else:
## NOTE: bookmark already exists
ret["comment"] = "bookmark is present"
return ret
def snapshot_present(name, recursive=False, properties=None):
"""
ensure snapshot exists and has properties set
name : string
name of snapshot
recursive : boolean
recursively create snapshots of all descendent datasets
properties : dict
additional zfs properties (-o)
.. note:
Properties are only set at creation time
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## log configuration
log.debug("zfs.snapshot_present::%s::config::recursive = %s", name, recursive)
log.debug("zfs.snapshot_present::%s::config::properties = %s", name, properties)
## ensure properties are zfs values
if properties:
properties = __utils__["zfs.from_auto_dict"](properties)
## check we have valid snapshot name
if not __utils__["zfs.is_snapshot"](name):
ret["result"] = False
ret["comment"] = "invalid snapshot name: {}".format(name)
return ret
## ensure snapshot exits
if not __salt__["zfs.exists"](name, **{"type": "snapshot"}):
## NOTE: create the snapshot
if not __opts__["test"]:
mod_res = __salt__["zfs.snapshot"](
name, **{"recursive": recursive, "properties": properties}
)
else:
mod_res = OrderedDict([("snapshotted", True)])
ret["result"] = mod_res["snapshotted"]
if ret["result"]:
ret["changes"][name] = "snapshotted"
if properties:
ret["changes"][name] = properties
ret["comment"] = "snapshot {} was created".format(name)
else:
ret["comment"] = "failed to create snapshot {}".format(name)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
else:
## NOTE: snapshot already exists
ret["comment"] = "snapshot is present"
return ret
def promoted(name):
"""
ensure a dataset is not a clone
name : string
name of fileset or volume
.. warning::
only one dataset can be the origin,
if you promote a clone the original will now point to the promoted dataset
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## check we if we have a valid dataset name
if not __utils__["zfs.is_dataset"](name):
ret["result"] = False
ret["comment"] = "invalid dataset name: {}".format(name)
return ret
## ensure dataset is the primary instance
if not __salt__["zfs.exists"](name, **{"type": "filesystem,volume"}):
## NOTE: we don't have a dataset
ret["result"] = False
ret["comment"] = "dataset {} does not exist".format(name)
else:
## NOTE: check if we have a blank origin (-)
if (
__salt__["zfs.get"](
name, **{"properties": "origin", "fields": "value", "parsable": True}
)[name]["origin"]["value"]
== "-"
):
## NOTE: we're already promoted
ret["comment"] = "{} already promoted".format(name)
else:
## NOTE: promote dataset
if not __opts__["test"]:
mod_res = __salt__["zfs.promote"](name)
else:
mod_res = OrderedDict([("promoted", True)])
ret["result"] = mod_res["promoted"]
if ret["result"]:
ret["changes"][name] = "promoted"
ret["comment"] = "{} promoted".format(name)
else:
ret["comment"] = "failed to promote {}".format(name)
if "error" in mod_res:
ret["comment"] = mod_res["error"]
return ret
def _schedule_snapshot_retrieve(dataset, prefix, snapshots):
"""
Update snapshots dict with current snapshots
dataset: string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
snapshots : OrderedDict
preseeded OrderedDict with configuration
"""
## NOTE: retrieve all snapshots for the dataset
for snap in sorted(
__salt__["zfs.list"](
dataset, **{"recursive": True, "depth": 1, "type": "snapshot"}
).keys()
):
## NOTE: we only want the actualy name
## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248
snap_name = snap[snap.index("@") + 1 :]
## NOTE: we only want snapshots matching our prefix
if not snap_name.startswith("{}-".format(prefix)):
continue
## NOTE: retrieve the holds for this snapshot
snap_holds = __salt__["zfs.holds"](snap)
## NOTE: this snapshot has no holds, eligable for pruning
if not snap_holds:
snapshots["_prunable"].append(snap)
## NOTE: update snapshots based on holds (if any)
## we are only interested in the ones from our schedule
## if we find any others we skip them
for hold in snap_holds:
if hold in snapshots["_schedule"].keys():
snapshots[hold].append(snap)
return snapshots
def _schedule_snapshot_prepare(dataset, prefix, snapshots):
"""
Update snapshots dict with info for a new snapshot
dataset: string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
snapshots : OrderedDict
preseeded OrderedDict with configuration
"""
## NOTE: generate new snapshot name
snapshot_create_name = "{dataset}@{prefix}-{timestamp}".format(
dataset=dataset,
prefix=prefix,
timestamp=datetime.now().strftime("%Y%m%d_%H%M%S"),
)
## NOTE: figure out if we need to create the snapshot
timestamp_now = datetime.now().replace(second=0, microsecond=0)
snapshots["_create"][snapshot_create_name] = []
for hold, hold_count in snapshots["_schedule"].items():
## NOTE: skip hold if we don't keep snapshots for it
if hold_count == 0:
continue
## NOTE: figure out if we need the current hold on the new snapshot
if snapshots[hold]:
## NOTE: extract datetime from snapshot name
timestamp = datetime.strptime(
snapshots[hold][-1],
"{}@{}-%Y%m%d_%H%M%S".format(dataset, prefix),
).replace(second=0, microsecond=0)
## NOTE: compare current timestamp to timestamp from snapshot
if hold == "minute" and timestamp_now <= timestamp:
continue
elif hold == "hour" and timestamp_now.replace(
**comp_hour
) <= timestamp.replace(**comp_hour):
continue
elif hold == "day" and timestamp_now.replace(
**comp_day
) <= timestamp.replace(**comp_day):
continue
elif hold == "month" and timestamp_now.replace(
**comp_month
) <= timestamp.replace(**comp_month):
continue
elif hold == "year" and timestamp_now.replace(
**comp_year
) <= timestamp.replace(**comp_year):
continue
## NOTE: add hold entry for snapshot
snapshots["_create"][snapshot_create_name].append(hold)
return snapshots
def scheduled_snapshot(name, prefix, recursive=True, schedule=None):
"""
maintain a set of snapshots based on a schedule
name : string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
recursive : boolean
create snapshots for all children also
schedule : dict
dict holding the schedule, the following keys are available (minute, hour,
day, month, and year) by default all are set to 0 the value indicated the
number of snapshots of that type to keep around.
.. warning::
snapshots will only be created and pruned every time the state runs.
a schedule must be setup to automatically run the state. this means that if
you run the state daily the hourly snapshot will only be made once per day!
.. versionchanged:: 2018.3.0
switched to localtime from gmtime so times now take into account timezones.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
## initialize defaults
schedule_holds = ["minute", "hour", "day", "month", "year"]
snapshots = OrderedDict(
[("_create", OrderedDict()), ("_prunable", []), ("_schedule", OrderedDict())]
)
## strict configuration validation
## NOTE: we need a valid dataset
if not __utils__["zfs.is_dataset"](name):
ret["result"] = False
ret["comment"] = "invalid dataset name: {}".format(name)
if not __salt__["zfs.exists"](name, **{"type": "filesystem,volume"}):
ret["comment"] = "dataset {} does not exist".format(name)
ret["result"] = False
## NOTE: prefix must be 4 or longer
if not prefix or len(prefix) < 4:
ret["comment"] = "prefix ({}) must be at least 4 long".format(prefix)
ret["result"] = False
## NOTE: validate schedule
total_count = 0
for hold in schedule_holds:
snapshots[hold] = []
if hold not in schedule:
snapshots["_schedule"][hold] = 0
elif isinstance(schedule[hold], int):
snapshots["_schedule"][hold] = schedule[hold]
else:
ret["result"] = False
ret["comment"] = "schedule value for {} is not an integer".format(
hold,
)
break
total_count += snapshots["_schedule"][hold]
if ret["result"] and total_count == 0:
ret["result"] = False
ret["comment"] = "schedule is not valid, you need to keep atleast 1 snapshot"
## NOTE: return if configuration is not valid
if not ret["result"]:
return ret
## retrieve existing snapshots
snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots)
## prepare snapshot
snapshots = _schedule_snapshot_prepare(name, prefix, snapshots)
## log configuration
log.debug("zfs.scheduled_snapshot::%s::config::recursive = %s", name, recursive)
log.debug("zfs.scheduled_snapshot::%s::config::prefix = %s", name, prefix)
log.debug("zfs.scheduled_snapshot::%s::snapshots = %s", name, snapshots)
## create snapshot(s)
for snapshot_name, snapshot_holds in snapshots["_create"].items():
## NOTE: skip if new snapshot has no holds
if not snapshot_holds:
continue
## NOTE: create snapshot
if not __opts__["test"]:
mod_res = __salt__["zfs.snapshot"](
snapshot_name, **{"recursive": recursive}
)
else:
mod_res = OrderedDict([("snapshotted", True)])
if not mod_res["snapshotted"]:
ret["result"] = False
ret["comment"] = "error creating snapshot ({})".format(snapshot_name)
else:
## NOTE: create holds (if we have a snapshot)
for hold in snapshot_holds:
if not __opts__["test"]:
mod_res = __salt__["zfs.hold"](
hold, snapshot_name, **{"recursive": recursive}
)
else:
mod_res = OrderedDict([("held", True)])
if not mod_res["held"]:
ret["result"] = False
ret["comment"] = "error adding hold ({}) to snapshot ({})".format(
hold,
snapshot_name,
)
break
snapshots[hold].append(snapshot_name)
if ret["result"]:
ret["comment"] = "scheduled snapshots updated"
if "created" not in ret["changes"]:
ret["changes"]["created"] = []
ret["changes"]["created"].append(snapshot_name)
## prune hold(s)
for hold, hold_count in snapshots["_schedule"].items():
while ret["result"] and len(snapshots[hold]) > hold_count:
## NOTE: pop oldest snapshot
snapshot_name = snapshots[hold].pop(0)
## NOTE: release hold for snapshot
if not __opts__["test"]:
mod_res = __salt__["zfs.release"](
hold, snapshot_name, **{"recursive": recursive}
)
else:
mod_res = OrderedDict([("released", True)])
if not mod_res["released"]:
ret["result"] = False
ret["comment"] = "error adding hold ({}) to snapshot ({})".format(
hold,
snapshot_name,
)
## NOTE: mark as prunable
if not __salt__["zfs.holds"](snapshot_name):
snapshots["_prunable"].append(snapshot_name)
## prune snapshot(s)
for snapshot_name in snapshots["_prunable"]:
## NOTE: destroy snapshot
if not __opts__["test"]:
mod_res = __salt__["zfs.destroy"](snapshot_name, **{"recursive": recursive})
else:
mod_res = OrderedDict([("destroyed", True)])
if not mod_res["destroyed"]:
ret["result"] = False
ret["comment"] = "error prunding snapshot ({1})".format(
snapshot_name,
)
break
if ret["result"] and snapshots["_prunable"]:
ret["comment"] = "scheduled snapshots updated"
ret["changes"]["pruned"] = snapshots["_prunable"]
if ret["result"] and not ret["changes"]:
ret["comment"] = "scheduled snapshots are up to date"
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/zfs.py | 0.579043 | 0.261195 | zfs.py | pypi |
import logging
import salt.utils.data
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = "powercfg"
def __virtual__():
"""
Only work on Windows
"""
if not salt.utils.platform.is_windows():
return False, "PowerCFG: Module only works on Windows"
return __virtualname__
def set_timeout(name, value, power="ac", scheme=None):
"""
Set the sleep timeouts of specific items such as disk, monitor, etc.
Args:
name (str)
The setting to change, can be one of the following:
- ``monitor``
- ``disk``
- ``standby``
- ``hibernate``
value (int):
The amount of time in minutes before the item will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
CLI Example:
.. code-block:: yaml
# Set monitor timeout to 30 minutes on Battery
monitor:
powercfg.set_timeout:
- value: 30
- power: dc
# Set disk timeout to 10 minutes on AC Power
disk:
powercfg.set_timeout:
- value: 10
- power: ac
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
# Validate name values
name = name.lower()
if name not in ["monitor", "disk", "standby", "hibernate"]:
ret["result"] = False
ret["comment"] = '"{}" is not a valid setting'.format(name)
log.debug(ret["comment"])
return ret
# Validate power values
power = power.lower()
if power not in ["ac", "dc"]:
ret["result"] = False
ret["comment"] = '"{}" is not a power type'.format(power)
log.debug(ret["comment"])
return ret
# Get current settings
old = __salt__["powercfg.get_{}_timeout".format(name)](scheme=scheme)
# Check current settings
if old[power] == value:
ret["comment"] = "{} timeout on {} power is already set to {}".format(
name.capitalize(), power.upper(), value
)
return ret
else:
ret["comment"] = "{} timeout on {} power will be set to {}".format(
name.capitalize(), power.upper(), value
)
# Check for test=True
if __opts__["test"]:
ret["result"] = None
return ret
# Set the timeout value
__salt__["powercfg.set_{}_timeout".format(name)](
timeout=value, power=power, scheme=scheme
)
# Get the setting after the change
new = __salt__["powercfg.get_{}_timeout".format(name)](scheme=scheme)
changes = salt.utils.data.compare_dicts(old, new)
if changes:
ret["changes"] = {name: changes}
ret["comment"] = "{} timeout on {} power set to {}".format(
name.capitalize(), power.upper(), value
)
log.debug(ret["comment"])
else:
ret["changes"] = {}
ret["comment"] = "Failed to set {} timeout on {} power to {}".format(
name, power.upper(), value
)
log.debug(ret["comment"])
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/win_powercfg.py | 0.781581 | 0.230866 | win_powercfg.py | pypi |
def __virtual__():
"""
Only load if the postgres module is present
"""
if "postgres.privileges_grant" not in __salt__:
return (
False,
"Unable to load postgres module. Make sure `postgres.bins_dir` is set.",
)
return True
def present(
name,
object_name,
object_type,
privileges=None,
grant_option=None,
prepend="public",
maintenance_db=None,
user=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None,
):
"""
Grant the requested privilege(s) on the specified object to a role
name
Name of the role to which privileges should be granted
object_name
Name of the object on which the grant is to be performed.
'ALL' may be used for objects of type 'table' or 'sequence'.
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- tablespace
- language
- database
- group
- function
View permissions should specify `object_type: table`.
privileges
List of privileges to grant, from the list below:
- INSERT
- CREATE
- TRUNCATE
- CONNECT
- TRIGGER
- SELECT
- USAGE
- TEMPORARY
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
:note: privileges should not be set when granting group membership
grant_option
If grant_option is set to True, the recipient of the privilege can
in turn grant it to others
prepend
Table and Sequence object types live under a schema so this should be
provided if the object is not under the default `public` schema
maintenance_db
The name of the database in which the language is to be installed
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "The requested privilege(s) are already set",
}
privileges = ",".join(privileges) if privileges else None
kwargs = {
"privileges": privileges,
"grant_option": grant_option,
"prepend": prepend,
"maintenance_db": maintenance_db,
"runas": user,
"host": db_host,
"user": db_user,
"port": db_port,
"password": db_password,
}
if not __salt__["postgres.has_privileges"](
name, object_name, object_type, **kwargs
):
_privs = object_name if object_type == "group" else privileges
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The privilege(s): {} are set to be granted to {}".format(
_privs, name
)
return ret
if __salt__["postgres.privileges_grant"](
name, object_name, object_type, **kwargs
):
ret["comment"] = "The privilege(s): {} have been granted to {}".format(
_privs, name
)
ret["changes"][name] = "Present"
else:
ret["comment"] = "Failed to grant privilege(s): {} to {}".format(
_privs, name
)
ret["result"] = False
return ret
def absent(
name,
object_name,
object_type,
privileges=None,
prepend="public",
maintenance_db=None,
user=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None,
):
"""
Revoke the requested privilege(s) on the specificed object(s)
name
Name of the role whose privileges should be revoked
object_name
Name of the object on which the revoke is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- tablespace
- language
- database
- group
- function
View permissions should specify `object_type: table`.
privileges
Comma separated list of privileges to revoke, from the list below:
- INSERT
- CREATE
- TRUNCATE
- CONNECT
- TRIGGER
- SELECT
- USAGE
- TEMPORARY
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
:note: privileges should not be set when revoking group membership
prepend
Table and Sequence object types live under a schema so this should be
provided if the object is not under the default `public` schema
maintenance_db
The name of the database in which the language is to be installed
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "The requested privilege(s) are not set so cannot be revoked",
}
privileges = ",".join(privileges) if privileges else None
kwargs = {
"privileges": privileges,
"prepend": prepend,
"maintenance_db": maintenance_db,
"runas": user,
"host": db_host,
"user": db_user,
"port": db_port,
"password": db_password,
}
if __salt__["postgres.has_privileges"](name, object_name, object_type, **kwargs):
_privs = object_name if object_type == "group" else privileges
if __opts__["test"]:
ret["result"] = None
ret[
"comment"
] = "The privilege(s): {} are set to be revoked from {}".format(
_privs, name
)
return ret
if __salt__["postgres.privileges_revoke"](
name, object_name, object_type, **kwargs
):
ret["comment"] = "The privilege(s): {} have been revoked from {}".format(
_privs, name
)
ret["changes"][name] = "Absent"
else:
ret["comment"] = "Failed to revoke privilege(s): {} from {}".format(
_privs, name
)
ret["result"] = False
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/postgres_privileges.py | 0.718397 | 0.227319 | postgres_privileges.py | pypi |
def __virtual__():
"""
Only make these states available if Zabbix module is available.
"""
if "zabbix.usermacro_create" in __salt__:
return True
return (False, "zabbix module could not be loaded")
def present(name, value, hostid=None, **kwargs):
"""
Creates a new usermacro.
:param name: name of the usermacro
:param value: value of the usermacro
:param hostid: id's of the hosts to apply the usermacro on, if missing a global usermacro is assumed.
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
override host usermacro:
zabbix_usermacro.present:
- name: '{$SNMP_COMMUNITY}''
- value: 'public'
- hostid: 21
"""
connection_args = {}
if "_connection_user" in kwargs:
connection_args["_connection_user"] = kwargs["_connection_user"]
if "_connection_password" in kwargs:
connection_args["_connection_password"] = kwargs["_connection_password"]
if "_connection_url" in kwargs:
connection_args["_connection_url"] = kwargs["_connection_url"]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
if hostid:
comment_usermacro_created = "Usermacro {} created on hostid {}.".format(
name, hostid
)
comment_usermacro_updated = "Usermacro {} updated on hostid {}.".format(
name, hostid
)
comment_usermacro_notcreated = (
"Unable to create usermacro: {} on hostid {}. ".format(name, hostid)
)
comment_usermacro_exists = "Usermacro {} already exists on hostid {}.".format(
name, hostid
)
changes_usermacro_created = {
name: {
"old": "Usermacro {} does not exist on hostid {}.".format(name, hostid),
"new": "Usermacro {} created on hostid {}.".format(name, hostid),
}
}
else:
comment_usermacro_created = "Usermacro {} created.".format(name)
comment_usermacro_updated = "Usermacro {} updated.".format(name)
comment_usermacro_notcreated = "Unable to create usermacro: {}. ".format(name)
comment_usermacro_exists = "Usermacro {} already exists.".format(name)
changes_usermacro_created = {
name: {
"old": "Usermacro {} does not exist.".format(name),
"new": "Usermacro {} created.".format(name),
}
}
# Zabbix API expects script parameters as a string of arguments separated by newline characters
if "exec_params" in kwargs:
if isinstance(kwargs["exec_params"], list):
kwargs["exec_params"] = "\n".join(kwargs["exec_params"]) + "\n"
else:
kwargs["exec_params"] = str(kwargs["exec_params"]) + "\n"
if hostid:
usermacro_exists = __salt__["zabbix.usermacro_get"](
name, hostids=hostid, **connection_args
)
else:
usermacro_exists = __salt__["zabbix.usermacro_get"](
name, globalmacro=True, **connection_args
)
if usermacro_exists:
usermacroobj = usermacro_exists[0]
if hostid:
usermacroid = int(usermacroobj["hostmacroid"])
else:
usermacroid = int(usermacroobj["globalmacroid"])
update_value = False
if str(value) != usermacroobj["value"]:
update_value = True
# Dry run, test=true mode
if __opts__["test"]:
if usermacro_exists:
if update_value:
ret["result"] = None
ret["comment"] = comment_usermacro_updated
else:
ret["result"] = True
ret["comment"] = comment_usermacro_exists
else:
ret["result"] = None
ret["comment"] = comment_usermacro_created
return ret
error = []
if usermacro_exists:
if update_value:
ret["result"] = True
ret["comment"] = comment_usermacro_updated
if hostid:
updated_value = __salt__["zabbix.usermacro_update"](
usermacroid, value=value, **connection_args
)
else:
updated_value = __salt__["zabbix.usermacro_updateglobal"](
usermacroid, value=value, **connection_args
)
if not isinstance(updated_value, int):
if "error" in updated_value:
error.append(updated_value["error"])
else:
ret["changes"]["value"] = value
else:
ret["result"] = True
ret["comment"] = comment_usermacro_exists
else:
if hostid:
usermacro_create = __salt__["zabbix.usermacro_create"](
name, value, hostid, **connection_args
)
else:
usermacro_create = __salt__["zabbix.usermacro_createglobal"](
name, value, **connection_args
)
if "error" not in usermacro_create:
ret["result"] = True
ret["comment"] = comment_usermacro_created
ret["changes"] = changes_usermacro_created
else:
ret["result"] = False
ret["comment"] = comment_usermacro_notcreated + str(
usermacro_create["error"]
)
# error detected
if error:
ret["changes"] = {}
ret["result"] = False
ret["comment"] = str(error)
return ret
def absent(name, hostid=None, **kwargs):
"""
Ensures that the mediatype does not exist, eventually deletes the mediatype.
:param name: name of the usermacro
:param hostid: id's of the hosts to apply the usermacro on, if missing a global usermacro is assumed.
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
delete_usermacro:
zabbix_usermacro.absent:
- name: '{$SNMP_COMMUNITY}'
"""
connection_args = {}
if "_connection_user" in kwargs:
connection_args["_connection_user"] = kwargs["_connection_user"]
if "_connection_password" in kwargs:
connection_args["_connection_password"] = kwargs["_connection_password"]
if "_connection_url" in kwargs:
connection_args["_connection_url"] = kwargs["_connection_url"]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
if hostid:
comment_usermacro_deleted = "Usermacro {} deleted from hostid {}.".format(
name, hostid
)
comment_usermacro_notdeleted = (
"Unable to delete usermacro: {} from hostid {}.".format(name, hostid)
)
comment_usermacro_notexists = (
"Usermacro {} does not exist on hostid {}.".format(name, hostid)
)
changes_usermacro_deleted = {
name: {
"old": "Usermacro {} exists on hostid {}.".format(name, hostid),
"new": "Usermacro {} deleted from {}.".format(name, hostid),
}
}
else:
comment_usermacro_deleted = "Usermacro {} deleted.".format(name)
comment_usermacro_notdeleted = "Unable to delete usermacro: {}.".format(name)
comment_usermacro_notexists = "Usermacro {} does not exist.".format(name)
changes_usermacro_deleted = {
name: {
"old": "Usermacro {} exists.".format(name),
"new": "Usermacro {} deleted.".format(name),
}
}
if hostid:
usermacro_exists = __salt__["zabbix.usermacro_get"](
name, hostids=hostid, **connection_args
)
else:
usermacro_exists = __salt__["zabbix.usermacro_get"](
name, globalmacro=True, **connection_args
)
# Dry run, test=true mode
if __opts__["test"]:
if not usermacro_exists:
ret["result"] = True
ret["comment"] = comment_usermacro_notexists
else:
ret["result"] = None
ret["comment"] = comment_usermacro_deleted
return ret
if not usermacro_exists:
ret["result"] = True
ret["comment"] = comment_usermacro_notexists
else:
try:
if hostid:
usermacroid = usermacro_exists[0]["hostmacroid"]
usermacro_delete = __salt__["zabbix.usermacro_delete"](
usermacroid, **connection_args
)
else:
usermacroid = usermacro_exists[0]["globalmacroid"]
usermacro_delete = __salt__["zabbix.usermacro_deleteglobal"](
usermacroid, **connection_args
)
except KeyError:
usermacro_delete = False
if usermacro_delete and "error" not in usermacro_delete:
ret["result"] = True
ret["comment"] = comment_usermacro_deleted
ret["changes"] = changes_usermacro_deleted
else:
ret["result"] = False
ret["comment"] = comment_usermacro_notdeleted + str(
usermacro_delete["error"]
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/zabbix_usermacro.py | 0.721253 | 0.202148 | zabbix_usermacro.py | pypi |
from salt.exceptions import CommandExecutionError, CommandNotFoundError
def __virtual__():
"""
Only load if the bower module is available in __salt__
"""
if "bower.list" in __salt__:
return "bower"
return (False, "bower module could not be loaded")
def installed(name, dir, pkgs=None, user=None, env=None):
"""
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
underscore:
bower.installed:
- dir: /path/to/project
- user: someuser
jquery#2.0:
bower.installed:
- dir: /path/to/project
name
The package to install
dir
The target directory in which to install the package
pkgs
A list of packages to install with a single Bower invocation;
specifying this argument will ignore the ``name`` argument
user
The user to run Bower with
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__["bower.list"](dir=dir, runas=user, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error looking up '{}': {}".format(name, err)
return ret
else:
installed_pkgs = {p: info for p, info in installed_pkgs.items()}
pkgs_satisfied = []
pkgs_to_install = []
for pkg in pkg_list:
pkg_name, _, pkg_ver = pkg.partition("#")
pkg_name = pkg_name.strip()
if pkg_name not in installed_pkgs:
pkgs_to_install.append(pkg)
continue
if pkg_name in installed_pkgs:
installed_pkg = installed_pkgs[pkg_name]
installed_pkg_ver = installed_pkg.get("pkgMeta").get("version")
installed_name_ver = "{}#{}".format(pkg_name, installed_pkg_ver)
# If given an explicit version check the installed version matches.
if pkg_ver:
if installed_pkg_ver != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(installed_name_ver)
continue
else:
pkgs_satisfied.append(installed_name_ver)
continue
if __opts__["test"]:
ret["result"] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append(
"Bower package(s) '{}' are set to be installed".format(
", ".join(pkgs_to_install)
)
)
ret["changes"] = {"old": [], "new": pkgs_to_install}
if pkgs_satisfied:
comment_msg.append(
"Package(s) '{}' satisfied by {}".format(
", ".join(pkg_list), ", ".join(pkgs_satisfied)
)
)
ret["comment"] = ". ".join(comment_msg)
return ret
if not pkgs_to_install:
ret["result"] = True
ret["comment"] = "Package(s) '{}' satisfied by {}".format(
", ".join(pkg_list), ", ".join(pkgs_satisfied)
)
return ret
try:
cmd_args = {
"pkg": None,
"dir": dir,
"pkgs": None,
"runas": user,
"env": env,
}
if pkgs is not None:
cmd_args["pkgs"] = pkgs
else:
cmd_args["pkg"] = pkg_name
call = __salt__["bower.install"](**cmd_args)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error installing '{}': {}".format(", ".join(pkg_list), err)
return ret
if call:
ret["result"] = True
ret["changes"] = {"old": [], "new": pkgs_to_install}
ret["comment"] = "Package(s) '{}' successfully installed".format(
", ".join(pkgs_to_install)
)
else:
ret["result"] = False
ret["comment"] = "Could not install package(s) '{}'".format(", ".join(pkg_list))
return ret
def removed(name, dir, user=None):
"""
Verify that the given package is not installed.
dir
The target directory in which to install the package
user
The user to run Bower with
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
try:
installed_pkgs = __salt__["bower.list"](dir=dir, runas=user)
except (CommandExecutionError, CommandNotFoundError) as err:
ret["result"] = False
ret["comment"] = "Error removing '{}': {}".format(name, err)
return ret
if name not in installed_pkgs:
ret["result"] = True
ret["comment"] = "Package '{}' is not installed".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Package '{}' is set to be removed".format(name)
return ret
try:
if __salt__["bower.uninstall"](pkg=name, dir=dir, runas=user):
ret["result"] = True
ret["changes"] = {name: "Removed"}
ret["comment"] = "Package '{}' was successfully removed".format(name)
else:
ret["result"] = False
ret["comment"] = "Error removing '{}'".format(name)
except (CommandExecutionError, CommandNotFoundError) as err:
ret["result"] = False
ret["comment"] = "Error removing '{}': {}".format(name, err)
return ret
def bootstrap(name, user=None):
"""
Bootstraps a frontend distribution.
Will execute 'bower install' on the specified directory.
user
The user to run Bower with
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Directory '{}' is set to be bootstrapped".format(name)
return ret
try:
call = __salt__["bower.install"](pkg=None, dir=name, runas=user)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error bootstrapping '{}': {}".format(name, err)
return ret
if not call:
ret["result"] = True
ret["comment"] = "Directory is already bootstrapped"
return ret
ret["result"] = True
ret["changes"] = {name: "Bootstrapped"}
ret["comment"] = "Directory was successfully bootstrapped"
return ret
def pruned(name, user=None, env=None):
"""
.. versionadded:: 2017.7.0
Cleans up local bower_components directory.
Will execute 'bower prune' on the specified directory (param: name)
user
The user to run Bower with
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Directory '{}' is set to be pruned".format(name)
return ret
try:
call = __salt__["bower.prune"](dir=name, runas=user, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret["result"] = False
ret["comment"] = "Error pruning '{}': {}".format(name, err)
return ret
ret["result"] = True
if call:
ret["comment"] = "Directory '{}' was successfully pruned".format(name)
ret["changes"] = {"old": [], "new": call}
else:
ret["comment"] = "No packages were pruned from directory '{}'".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/bower.py | 0.631253 | 0.175361 | bower.py | pypi |
import logging
__virtualname__ = "nxos"
__virtual_aliases__ = ("nxos_upgrade",)
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def image_running(name, system_image, kickstart_image=None, issu=True, **kwargs):
"""
Ensure the NX-OS system image is running on the device.
name
Name of the salt state task
system_image
Name of the system image file on bootflash:
kickstart_image
Name of the kickstart image file on bootflash:
This is not needed if the system_image is a combined system and
kickstart image
Default: None
issu
Ensure the correct system is running on the device using an in service
software upgrade, or force a disruptive upgrade by setting the option
to False.
Default: False
timeout
Timeout in seconds for long running 'install all' upgrade command.
Default: 900
Examples:
.. code-block:: yaml
upgrade_software_image_n9k:
nxos.image_running:
- name: Ensure nxos.7.0.3.I7.5a.bin is running
- system_image: nxos.7.0.3.I7.5a.bin
- issu: True
upgrade_software_image_n7k:
nxos.image_running:
- name: Ensure n7000-s2-kickstart.8.0.1.bin is running
- kickstart_image: n7000-s2-kickstart.8.0.1.bin
- system_image: n7000-s2-dk9.8.0.1.bin
- issu: False
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
if kickstart_image is None:
upgrade = __salt__["nxos.upgrade"](
system_image=system_image, issu=issu, **kwargs
)
else:
upgrade = __salt__["nxos.upgrade"](
system_image=system_image,
kickstart_image=kickstart_image,
issu=issu,
**kwargs
)
if upgrade["upgrade_in_progress"]:
ret["result"] = upgrade["upgrade_in_progress"]
ret["changes"] = upgrade["module_data"]
ret["comment"] = "NX-OS Device Now Being Upgraded - See Change Details Below"
elif upgrade["succeeded"]:
ret["result"] = upgrade["succeeded"]
ret["comment"] = "NX-OS Device Running Image: {}".format(_version_info())
else:
ret["comment"] = "Upgrade Failed: {}.".format(upgrade["error_data"])
return ret
def _version_info():
"""
Helper method to return running image version
"""
if "NXOS" in __grains__["nxos"]["software"]:
return __grains__["nxos"]["software"]["NXOS"]
elif "kickstart" in __grains__["nxos"]["software"]:
return __grains__["nxos"]["software"]["kickstart"]
else:
return "Unable to detect sofware version" | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/nxos_upgrade.py | 0.607547 | 0.151278 | nxos_upgrade.py | pypi |
from salt.utils.dictdiffer import deep_diff
def __virtual__():
"""Only load if grafana4 module is available"""
if "grafana4.get_datasource" in __salt__:
return True
return (False, "grafana4 module could not be loaded")
def present(
name,
type,
url,
access=None,
user=None,
password=None,
database=None,
basic_auth=None,
basic_auth_user=None,
basic_auth_password=None,
tls_auth=None,
json_data=None,
is_default=None,
with_credentials=None,
type_logo_url=None,
orgname=None,
profile="grafana",
):
"""
Ensure that a data source is present.
name
Name of the data source.
type
Type of the datasource ('graphite', 'influxdb' etc.).
access
Use proxy or direct. Default: proxy
url
The URL to the data source API.
user
Optional - user to authenticate with the data source.
password
Optional - password to authenticate with the data source.
database
Optional - database to use with the data source.
basic_auth
Optional - set to True to use HTTP basic auth to authenticate with the
data source.
basic_auth_user
Optional - HTTP basic auth username.
basic_auth_password
Optional - HTTP basic auth password.
json_data
Optional - additional json data to post (eg. "timeInterval").
is_default
Optional - set data source as default.
with_credentials
Optional - Whether credentials such as cookies or auth headers should
be sent with cross-site requests.
type_logo_url
Optional - Logo to use for this datasource.
orgname
Name of the organization in which the data source should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
datasource = __salt__["grafana4.get_datasource"](name, orgname, profile)
data = _get_json_data(
name=name,
type=type,
url=url,
access=access,
user=user,
password=password,
database=database,
basicAuth=basic_auth,
basicAuthUser=basic_auth_user,
basicAuthPassword=basic_auth_password,
tlsAuth=tls_auth,
jsonData=json_data,
isDefault=is_default,
withCredentials=with_credentials,
typeLogoUrl=type_logo_url,
defaults=datasource,
)
if not datasource:
if __opts__["test"]:
ret["comment"] = "Datasource {} will be created".format(name)
return ret
__salt__["grafana4.create_datasource"](profile=profile, **data)
datasource = __salt__["grafana4.get_datasource"](name, profile=profile)
ret["result"] = True
ret["comment"] = "New data source {} added".format(name)
ret["changes"] = data
return ret
# At this stage, the datasource exists; however, the object provided by
# Grafana may lack some null keys compared to our "data" dict:
for key in data:
if key not in datasource:
datasource[key] = None
if data == datasource:
ret["comment"] = "Data source {} already up-to-date".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Datasource {} will be updated".format(name)
return ret
__salt__["grafana4.update_datasource"](datasource["id"], profile=profile, **data)
ret["result"] = True
ret["changes"] = deep_diff(datasource, data, ignore=["id", "orgId", "readOnly"])
ret["comment"] = "Data source {} updated".format(name)
return ret
def absent(name, orgname=None, profile="grafana"):
"""
Ensure that a data source is present.
name
Name of the data source to remove.
orgname
Name of the organization from which the data source should be absent.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
"""
if isinstance(profile, str):
profile = __salt__["config.option"](profile)
ret = {"name": name, "result": None, "comment": None, "changes": {}}
datasource = __salt__["grafana4.get_datasource"](name, orgname, profile)
if not datasource:
ret["result"] = True
ret["comment"] = "Data source {} already absent".format(name)
return ret
if __opts__["test"]:
ret["comment"] = "Datasource {} will be deleted".format(name)
return ret
__salt__["grafana4.delete_datasource"](datasource["id"], profile=profile)
ret["result"] = True
ret["changes"][name] = "Absent"
ret["comment"] = "Data source {} was deleted".format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/grafana4_datasource.py | 0.606265 | 0.275714 | grafana4_datasource.py | pypi |
def __virtual__():
"""
Check if we have peer access ?
"""
return True
def _send_command(cmd, worker, lbn, target, profile="default", tgt_type="glob"):
"""
Send a command to the modjk loadbalancer
The minion need to be able to publish the commands to the load balancer
cmd:
worker_stop - won't get any traffic from the lbn
worker_activate - activate the worker
worker_disable - will get traffic only for current sessions
"""
ret = {
"code": False,
"msg": "OK",
"minions": [],
}
# Send the command to target
func = "modjk.{}".format(cmd)
args = [worker, lbn, profile]
response = __salt__["publish.publish"](target, func, args, tgt_type)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret["msg"] = "no servers answered the published command {}".format(cmd)
return ret
elif len(errors) > 0:
ret["msg"] = "the following minions return False"
ret["minions"] = errors
return ret
else:
ret["code"] = True
ret["msg"] = "the commad was published successfully"
ret["minions"] = minions
return ret
def _worker_status(target, worker, activation, profile="default", tgt_type="glob"):
"""
Check if the worker is in `activation` state in the targeted load balancers
The function will return the following dictionary:
result - False if no server returned from the published command
errors - list of servers that couldn't find the worker
wrong_state - list of servers that the worker was in the wrong state
(not activation)
"""
ret = {
"result": True,
"errors": [],
"wrong_state": [],
}
args = [worker, profile]
status = __salt__["publish.publish"](target, "modjk.worker_status", args, tgt_type)
# Did we got any respone from someone ?
if not status:
ret["result"] = False
return ret
# Search for errors & status
for balancer in status:
if not status[balancer]:
ret["errors"].append(balancer)
elif status[balancer]["activation"] != activation:
ret["wrong_state"].append(balancer)
return ret
def _talk2modjk(name, lbn, target, action, profile="default", tgt_type="glob"):
"""
Wrapper function for the stop/disable/activate functions
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
action_map = {
"worker_stop": "STP",
"worker_disable": "DIS",
"worker_activate": "ACT",
}
# Check what needs to be done
status = _worker_status(target, name, action_map[action], profile, tgt_type)
if not status["result"]:
ret["result"] = False
ret["comment"] = "no servers answered the published command modjk.worker_status"
return ret
if status["errors"]:
ret["result"] = False
ret[
"comment"
] = "the following balancers could not find the worker {}: {}".format(
name, status["errors"]
)
return ret
if not status["wrong_state"]:
ret[
"comment"
] = "the worker is in the desired activation state on all the balancers"
return ret
else:
ret["comment"] = "the action {} will be sent to the balancers {}".format(
action, status["wrong_state"]
)
ret["changes"] = {action: status["wrong_state"]}
if __opts__["test"]:
ret["result"] = None
return ret
# Send the action command to target
response = _send_command(action, name, lbn, target, profile, tgt_type)
ret["comment"] = response["msg"]
ret["result"] = response["code"]
return ret
def stop(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Stop the named worker from the lbn load balancers at the targeted minions
The worker won't get any traffic from the lbn
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.stop:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_stop", profile, tgt_type)
def activate(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Activate the named worker from the lbn load balancers at the targeted
minions
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.activate:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_activate", profile, tgt_type)
def disable(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_disable", profile, tgt_type) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/modjk_worker.py | 0.758332 | 0.261932 | modjk_worker.py | pypi |
import logging
import salt.utils.path
from salt.exceptions import CommandExecutionError
from salt.output import nested
log = logging.getLogger(__name__)
class ForwardingMapping:
"""
Represents a port forwarding statement mapping a local port to a remote
port for a specific protocol (TCP or UDP)
"""
def __init__(self, srcport, destport, protocol, destaddr):
self.srcport = srcport
self.destport = destport
self.protocol = protocol
self.destaddr = destaddr
def __eq__(self, other):
return (
self.srcport == other.srcport
and self.destport == other.destport
and self.protocol == other.protocol
and self.destaddr == other.destaddr
)
def __ne__(self, other):
return not self.__eq__(other)
# hash is needed for set operations
def __hash__(self):
return (
hash(self.srcport)
^ hash(self.destport)
^ hash(self.protocol)
^ hash(self.destaddr)
)
def todict(self):
"""
Returns a pretty dictionary meant for command line output.
"""
return {
"Source port": self.srcport,
"Destination port": self.destport,
"Protocol": self.protocol,
"Destination address": self.destaddr,
}
def _parse_forward(mapping):
"""
Parses a port forwarding statement in the form used by this state:
from_port:to_port:protocol[:destination]
and returns a ForwardingMapping object
"""
if len(mapping.split(":")) > 3:
(srcport, destport, protocol, destaddr) = mapping.split(":")
else:
(srcport, destport, protocol) = mapping.split(":")
destaddr = ""
return ForwardingMapping(srcport, destport, protocol, destaddr)
def __virtual__():
"""
Ensure the firewall-cmd is available
"""
if salt.utils.path.which("firewall-cmd"):
return True
return (
False,
"firewall-cmd is not available, firewalld is probably not installed.",
)
def present(
name,
block_icmp=None,
prune_block_icmp=False,
default=None,
masquerade=False,
ports=None,
prune_ports=False,
port_fwd=None,
prune_port_fwd=False,
services=None,
prune_services=False,
interfaces=None,
prune_interfaces=False,
sources=None,
prune_sources=False,
rich_rules=None,
prune_rich_rules=False,
):
"""
Ensure a zone has specific attributes.
name
The zone to modify.
default : None
Set this zone as the default zone if ``True``.
masquerade : False
Enable or disable masquerade for a zone.
block_icmp : None
List of ICMP types to block in the zone.
prune_block_icmp : False
If ``True``, remove all but the specified block_icmp from the zone.
ports : None
List of ports to add to the zone.
prune_ports : False
If ``True``, remove all but the specified ports from the zone.
port_fwd : None
List of port forwards to add to the zone.
prune_port_fwd : False
If ``True``, remove all but the specified port_fwd from the zone.
services : None
List of services to add to the zone.
prune_services : False
If ``True``, remove all but the specified services from the zone.
.. note:: Currently defaults to True for compatibility, but will be changed to False in a future release.
interfaces : None
List of interfaces to add to the zone.
prune_interfaces : False
If ``True``, remove all but the specified interfaces from the zone.
sources : None
List of sources to add to the zone.
prune_sources : False
If ``True``, remove all but the specified sources from the zone.
rich_rules : None
List of rich rules to add to the zone.
prune_rich_rules : False
If ``True``, remove all but the specified rich rules from the zone.
"""
ret = _present(
name,
block_icmp,
prune_block_icmp,
default,
masquerade,
ports,
prune_ports,
port_fwd,
prune_port_fwd,
services,
prune_services,
interfaces,
prune_interfaces,
sources,
prune_sources,
rich_rules,
prune_rich_rules,
)
# Reload firewalld service on changes
if ret["changes"] != {}:
__salt__["firewalld.reload_rules"]()
return ret
def service(name, ports=None, protocols=None):
"""
Ensure the service exists and encompasses the specified ports and
protocols.
.. versionadded:: 2016.11.0
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
if name not in __salt__["firewalld.get_services"]():
__salt__["firewalld.new_service"](name, restart=False)
ports = ports or []
try:
_current_ports = __salt__["firewalld.get_service_ports"](name)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_ports = set(ports) - set(_current_ports)
old_ports = set(_current_ports) - set(ports)
for port in new_ports:
if not __opts__["test"]:
try:
__salt__["firewalld.add_service_port"](name, port)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
for port in old_ports:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_service_port"](name, port)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_ports or old_ports:
ret["changes"].update({"ports": {"old": _current_ports, "new": ports}})
protocols = protocols or []
try:
_current_protocols = __salt__["firewalld.get_service_protocols"](name)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_protocols = set(protocols) - set(_current_protocols)
old_protocols = set(_current_protocols) - set(protocols)
for protocol in new_protocols:
if not __opts__["test"]:
try:
__salt__["firewalld.add_service_protocol"](name, protocol)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
for protocol in old_protocols:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_service_protocol"](name, protocol)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_protocols or old_protocols:
ret["changes"].update(
{"protocols": {"old": _current_protocols, "new": protocols}}
)
if ret["changes"] != {}:
__salt__["firewalld.reload_rules"]()
ret["result"] = True
if ret["changes"] == {}:
ret["comment"] = "'{}' is already in the desired state.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Configuration for '{}' will change.".format(name)
return ret
ret["comment"] = "'{}' was configured.".format(name)
return ret
def _present(
name,
block_icmp=None,
prune_block_icmp=False,
default=None,
masquerade=False,
ports=None,
prune_ports=False,
port_fwd=None,
prune_port_fwd=False,
services=None,
# TODO: prune_services=False in future release
# prune_services=False,
prune_services=None,
interfaces=None,
prune_interfaces=False,
sources=None,
prune_sources=False,
rich_rules=None,
prune_rich_rules=False,
):
"""
Ensure a zone has specific attributes.
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
try:
zones = __salt__["firewalld.get_zones"](permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if name not in zones:
if not __opts__["test"]:
try:
__salt__["firewalld.new_zone"](name)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update({name: {"old": zones, "new": name}})
if block_icmp or prune_block_icmp:
block_icmp = block_icmp or []
new_icmp_types = []
old_icmp_types = []
try:
_current_icmp_blocks = __salt__["firewalld.list_icmp_block"](
name, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if block_icmp:
try:
_valid_icmp_types = __salt__["firewalld.get_icmp_types"](permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
# log errors for invalid ICMP types in block_icmp input
for icmp_type in set(block_icmp) - set(_valid_icmp_types):
log.error("%s is an invalid ICMP type", icmp_type)
block_icmp.remove(icmp_type)
new_icmp_types = set(block_icmp) - set(_current_icmp_blocks)
for icmp_type in new_icmp_types:
if not __opts__["test"]:
try:
__salt__["firewalld.block_icmp"](
name, icmp_type, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_block_icmp:
old_icmp_types = set(_current_icmp_blocks) - set(block_icmp)
for icmp_type in old_icmp_types:
# no need to check against _valid_icmp_types here, because all
# elements in old_icmp_types are guaranteed to be in
# _current_icmp_blocks, whose elements are inherently valid
if not __opts__["test"]:
try:
__salt__["firewalld.allow_icmp"](
name, icmp_type, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_icmp_types or old_icmp_types:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_block_icmp:
block_icmp = list(new_icmp_types | set(_current_icmp_blocks))
ret["changes"].update(
{"icmp_types": {"old": _current_icmp_blocks, "new": block_icmp}}
)
# that's the only parameter that can't be permanent or runtime, it's
# directly both
if default:
try:
default_zone = __salt__["firewalld.default_zone"]()
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if name != default_zone:
if not __opts__["test"]:
try:
__salt__["firewalld.set_default_zone"](name)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update({"default": {"old": default_zone, "new": name}})
try:
masquerade_ret = __salt__["firewalld.get_masquerade"](name, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if masquerade and not masquerade_ret:
if not __opts__["test"]:
try:
__salt__["firewalld.add_masquerade"](name, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update(
{"masquerade": {"old": "", "new": "Masquerading successfully set."}}
)
elif not masquerade and masquerade_ret:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_masquerade"](name, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update(
{"masquerade": {"old": "", "new": "Masquerading successfully disabled."}}
)
if ports or prune_ports:
ports = ports or []
try:
_current_ports = __salt__["firewalld.list_ports"](name, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_ports = set(ports) - set(_current_ports)
old_ports = []
for port in new_ports:
if not __opts__["test"]:
try:
__salt__["firewalld.add_port"](
name, port, permanent=True, force_masquerade=False
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_ports:
old_ports = set(_current_ports) - set(ports)
for port in old_ports:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_port"](name, port, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_ports or old_ports:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_ports:
ports = list(new_ports | set(_current_ports))
ret["changes"].update({"ports": {"old": _current_ports, "new": ports}})
if port_fwd or prune_port_fwd:
port_fwd = port_fwd or []
try:
_current_port_fwd = __salt__["firewalld.list_port_fwd"](
name, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
port_fwd = [_parse_forward(fwd) for fwd in port_fwd]
_current_port_fwd = [
ForwardingMapping(
srcport=fwd["Source port"],
destport=fwd["Destination port"],
protocol=fwd["Protocol"],
destaddr=fwd["Destination address"],
)
for fwd in _current_port_fwd
]
new_port_fwd = set(port_fwd) - set(_current_port_fwd)
old_port_fwd = []
for fwd in new_port_fwd:
if not __opts__["test"]:
try:
__salt__["firewalld.add_port_fwd"](
name,
fwd.srcport,
fwd.destport,
fwd.protocol,
fwd.destaddr,
permanent=True,
force_masquerade=False,
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_port_fwd:
old_port_fwd = set(_current_port_fwd) - set(port_fwd)
for fwd in old_port_fwd:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_port_fwd"](
name,
fwd.srcport,
fwd.destport,
fwd.protocol,
fwd.destaddr,
permanent=True,
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_port_fwd or old_port_fwd:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_port_fwd:
port_fwd = list(new_port_fwd | set(_current_port_fwd))
ret["changes"].update(
{
"port_fwd": {
"old": [fwd.todict() for fwd in _current_port_fwd],
"new": [fwd.todict() for fwd in port_fwd],
}
}
)
if services or prune_services:
services = services or []
try:
_current_services = __salt__["firewalld.list_services"](
name, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_services = set(services) - set(_current_services)
old_services = []
for new_service in new_services:
if not __opts__["test"]:
try:
__salt__["firewalld.add_service"](new_service, name, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_services:
old_services = set(_current_services) - set(services)
for old_service in old_services:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_service"](
old_service, name, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_services or old_services:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_services:
services = list(new_services | set(_current_services))
ret["changes"].update(
{"services": {"old": _current_services, "new": services}}
)
if interfaces or prune_interfaces:
interfaces = interfaces or []
try:
_current_interfaces = __salt__["firewalld.get_interfaces"](
name, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_interfaces = set(interfaces) - set(_current_interfaces)
old_interfaces = []
for interface in new_interfaces:
if not __opts__["test"]:
try:
__salt__["firewalld.add_interface"](name, interface, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_interfaces:
old_interfaces = set(_current_interfaces) - set(interfaces)
for interface in old_interfaces:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_interface"](
name, interface, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_interfaces or old_interfaces:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_interfaces:
interfaces = list(new_interfaces | set(_current_interfaces))
ret["changes"].update(
{"interfaces": {"old": _current_interfaces, "new": interfaces}}
)
if sources or prune_sources:
sources = sources or []
try:
_current_sources = __salt__["firewalld.get_sources"](name, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_sources = set(sources) - set(_current_sources)
old_sources = []
for source in new_sources:
if not __opts__["test"]:
try:
__salt__["firewalld.add_source"](name, source, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_sources:
old_sources = set(_current_sources) - set(sources)
for source in old_sources:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_source"](
name, source, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_sources or old_sources:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_sources:
sources = list(new_sources | set(_current_sources))
ret["changes"].update(
{"sources": {"old": _current_sources, "new": sources}}
)
if rich_rules or prune_rich_rules:
rich_rules = rich_rules or []
try:
_current_rich_rules = __salt__["firewalld.get_rich_rules"](
name, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_rich_rules = set(rich_rules) - set(_current_rich_rules)
old_rich_rules = []
for rich_rule in new_rich_rules:
if not __opts__["test"]:
try:
__salt__["firewalld.add_rich_rule"](name, rich_rule, permanent=True)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if prune_rich_rules:
old_rich_rules = set(_current_rich_rules) - set(rich_rules)
for rich_rule in old_rich_rules:
if not __opts__["test"]:
try:
__salt__["firewalld.remove_rich_rule"](
name, rich_rule, permanent=True
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if new_rich_rules or old_rich_rules:
# If we're not pruning, include current items in new output so it's clear
# that they're still present
if not prune_rich_rules:
rich_rules = list(new_rich_rules | set(_current_rich_rules))
ret["changes"].update(
{"rich_rules": {"old": _current_rich_rules, "new": rich_rules}}
)
# No changes
if ret["changes"] == {}:
ret["result"] = True
ret["comment"] = "'{}' is already in the desired state.".format(name)
return ret
# test=True and changes predicted
if __opts__["test"]:
ret["result"] = None
# build comment string
nested.__opts__ = __opts__
comment = []
comment.append("Configuration for '{}' will change:".format(name))
comment.append(nested.output(ret["changes"]).rstrip())
ret["comment"] = "\n".join(comment)
ret["changes"] = {}
return ret
# Changes were made successfully
ret["result"] = True
ret["comment"] = "'{}' was configured.".format(name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/firewalld.py | 0.775987 | 0.17252 | firewalld.py | pypi |
import pathlib
import re
__virtualname__ = "idem"
def __virtual__():
if "idem.hub" in __utils__:
return __virtualname__
else:
return False, "idem is not available"
def _get_refs(sources, tree):
"""
Determine where the sls sources are
"""
sls_sources = []
SLSs = []
if tree:
sls_sources.append("file://{}".format(tree))
for sls in sources:
path = pathlib.Path(sls)
if path.is_file():
ref = str(path.stem if path.suffix == ".sls" else path.name)
SLSs.append(ref)
implied = "file://{}".format(path.parent)
if implied not in sls_sources:
sls_sources.append(implied)
else:
SLSs.append(sls)
return sls_sources, SLSs
def _get_low_data(low_data):
"""
Get salt-style low data from an idem state name
"""
# state_|-id_|-name_|-function
match = re.match(r"(.+)_\|-(.+)_\|-(.+)_\|-(.+)", low_data)
return {
"state": match.group(1),
"__id__": match.group(2),
"name": match.group(3),
"fun": match.group(4),
}
def state(
name,
sls,
acct_file=None,
acct_key=None,
acct_profile=None,
cache_dir=None,
render=None,
runtime=None,
source_dir=None,
test=False,
):
"""
Execute an idem sls file through a salt state
sls
A list of idem sls files or sources
acct_file
Path to the acct file used in generating idem ctx parameters.
Defaults to the value in the ACCT_FILE environment variable.
acct_key
Key used to decrypt the acct file.
Defaults to the value in the ACCT_KEY environment variable.
acct_profile
Name of the profile to add to idem's ctx.acct parameter
Defaults to the value in the ACCT_PROFILE environment variable.
cache_dir
The location to use for the cache directory
render
The render pipe to use, this allows for the language to be specified (jinja|yaml)
runtime
Select which execution runtime to use (serial|parallel)
source_dir
The directory containing sls files
.. code-block:: yaml
cheese:
idem.state:
- runtime: parallel
- sls:
- idem_state.sls
- sls_source
:maturity: new
:depends: acct, pop, pop-config, idem
:platform: all
"""
hub = __utils__["idem.hub"]()
if isinstance(sls, str):
sls = [sls]
sls_sources, SLSs = _get_refs(sls, source_dir or hub.OPT.idem.tree)
coro = hub.idem.state.apply(
name=name,
sls_sources=sls_sources,
render=render or hub.OPT.idem.render,
runtime=runtime or hub.OPT.idem.runtime,
subs=["states"],
cache_dir=cache_dir or hub.OPT.idem.cache_dir,
sls=SLSs,
test=test,
acct_file=acct_file or hub.OPT.acct.acct_file,
acct_key=acct_key or hub.OPT.acct.acct_key,
acct_profile=acct_profile or hub.OPT.acct.acct_profile or "default",
)
hub.pop.Loop.run_until_complete(coro)
errors = hub.idem.RUNS[name]["errors"]
success = not errors
running = []
for idem_name, idem_return in hub.idem.RUNS[name]["running"].items():
standardized_idem_return = {
"name": idem_return["name"],
"changes": idem_return["changes"],
"result": idem_return["result"],
"comment": idem_return.get("comment"),
"low": _get_low_data(idem_name),
}
running.append(standardized_idem_return)
return {
"name": name,
"result": success,
"comment": "Ran {} idem states".format(len(running)) if success else errors,
"changes": {},
"sub_state_run": running,
} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/idem.py | 0.499756 | 0.210624 | idem.py | pypi |
import logging
import os
import os.path
import salt.utils.data
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_cloudtrail.exists" in __salt__:
return "boto_cloudtrail"
return (False, "boto_cloudtrail module could not be loaded")
def present(
name,
Name,
S3BucketName,
S3KeyPrefix=None,
SnsTopicName=None,
IncludeGlobalServiceEvents=True,
IsMultiRegionTrail=None,
EnableLogFileValidation=False,
CloudWatchLogsLogGroupArn=None,
CloudWatchLogsRoleArn=None,
KmsKeyId=None,
LoggingEnabled=True,
Tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure trail exists.
name
The name of the state definition
Name
Name of the trail.
S3BucketName
Specifies the name of the Amazon S3 bucket designated for publishing log
files.
S3KeyPrefix
Specifies the Amazon S3 key prefix that comes after the name of the
bucket you have designated for log file delivery.
SnsTopicName
Specifies the name of the Amazon SNS topic defined for notification of
log file delivery. The maximum length is 256 characters.
IncludeGlobalServiceEvents
Specifies whether the trail is publishing events from global services
such as IAM to the log files.
EnableLogFileValidation
Specifies whether log file integrity validation is enabled. The default
is false.
CloudWatchLogsLogGroupArn
Specifies a log group name using an Amazon Resource Name (ARN), a unique
identifier that represents the log group to which CloudTrail logs will
be delivered. Not required unless you specify CloudWatchLogsRoleArn.
CloudWatchLogsRoleArn
Specifies the role for the CloudWatch Logs endpoint to assume to write
to a user's log group.
KmsKeyId
Specifies the KMS key ID to use to encrypt the logs delivered by
CloudTrail. The value can be a an alias name prefixed by "alias/", a
fully specified ARN to an alias, a fully specified ARN to a key, or a
globally unique identifier.
LoggingEnabled
Whether logging should be enabled for the trail
Tags
A dictionary of tags that should be set on the trail
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": Name, "result": True, "comment": "", "changes": {}}
r = __salt__["boto_cloudtrail.exists"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to create trail: {}.".format(r["error"]["message"])
return ret
if not r.get("exists"):
if __opts__["test"]:
ret["comment"] = "CloudTrail {} is set to be created.".format(Name)
ret["result"] = None
return ret
r = __salt__["boto_cloudtrail.create"](
Name=Name,
S3BucketName=S3BucketName,
S3KeyPrefix=S3KeyPrefix,
SnsTopicName=SnsTopicName,
IncludeGlobalServiceEvents=IncludeGlobalServiceEvents,
IsMultiRegionTrail=IsMultiRegionTrail,
EnableLogFileValidation=EnableLogFileValidation,
CloudWatchLogsLogGroupArn=CloudWatchLogsLogGroupArn,
CloudWatchLogsRoleArn=CloudWatchLogsRoleArn,
KmsKeyId=KmsKeyId,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not r.get("created"):
ret["result"] = False
ret["comment"] = "Failed to create trail: {}.".format(r["error"]["message"])
return ret
_describe = __salt__["boto_cloudtrail.describe"](
Name, region=region, key=key, keyid=keyid, profile=profile
)
ret["changes"]["old"] = {"trail": None}
ret["changes"]["new"] = _describe
ret["comment"] = "CloudTrail {} created.".format(Name)
if LoggingEnabled:
r = __salt__["boto_cloudtrail.start_logging"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to create trail: {}.".format(
r["error"]["message"]
)
ret["changes"] = {}
return ret
ret["changes"]["new"]["trail"]["LoggingEnabled"] = True
else:
ret["changes"]["new"]["trail"]["LoggingEnabled"] = False
if bool(Tags):
r = __salt__["boto_cloudtrail.add_tags"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile, **Tags
)
if not r.get("tagged"):
ret["result"] = False
ret["comment"] = "Failed to create trail: {}.".format(
r["error"]["message"]
)
ret["changes"] = {}
return ret
ret["changes"]["new"]["trail"]["Tags"] = Tags
return ret
ret["comment"] = os.linesep.join(
[ret["comment"], "CloudTrail {} is present.".format(Name)]
)
ret["changes"] = {}
# trail exists, ensure config matches
_describe = __salt__["boto_cloudtrail.describe"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in _describe:
ret["result"] = False
ret["comment"] = "Failed to update trail: {}.".format(
_describe["error"]["message"]
)
ret["changes"] = {}
return ret
_describe = _describe.get("trail")
r = __salt__["boto_cloudtrail.status"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
_describe["LoggingEnabled"] = r.get("trail", {}).get("IsLogging", False)
need_update = False
bucket_vars = {
"S3BucketName": "S3BucketName",
"S3KeyPrefix": "S3KeyPrefix",
"SnsTopicName": "SnsTopicName",
"IncludeGlobalServiceEvents": "IncludeGlobalServiceEvents",
"IsMultiRegionTrail": "IsMultiRegionTrail",
"EnableLogFileValidation": "LogFileValidationEnabled",
"CloudWatchLogsLogGroupArn": "CloudWatchLogsLogGroupArn",
"CloudWatchLogsRoleArn": "CloudWatchLogsRoleArn",
"KmsKeyId": "KmsKeyId",
"LoggingEnabled": "LoggingEnabled",
}
for invar, outvar in bucket_vars.items():
if _describe[outvar] != locals()[invar]:
need_update = True
ret["changes"].setdefault("new", {})[invar] = locals()[invar]
ret["changes"].setdefault("old", {})[invar] = _describe[outvar]
r = __salt__["boto_cloudtrail.list_tags"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
_describe["Tags"] = r.get("tags", {})
tagchange = salt.utils.data.compare_dicts(_describe["Tags"], Tags)
if bool(tagchange):
need_update = True
ret["changes"].setdefault("new", {})["Tags"] = Tags
ret["changes"].setdefault("old", {})["Tags"] = _describe["Tags"]
if need_update:
if __opts__["test"]:
msg = "CloudTrail {} set to be modified.".format(Name)
ret["comment"] = msg
ret["result"] = None
return ret
ret["comment"] = os.linesep.join([ret["comment"], "CloudTrail to be modified"])
r = __salt__["boto_cloudtrail.update"](
Name=Name,
S3BucketName=S3BucketName,
S3KeyPrefix=S3KeyPrefix,
SnsTopicName=SnsTopicName,
IncludeGlobalServiceEvents=IncludeGlobalServiceEvents,
IsMultiRegionTrail=IsMultiRegionTrail,
EnableLogFileValidation=EnableLogFileValidation,
CloudWatchLogsLogGroupArn=CloudWatchLogsLogGroupArn,
CloudWatchLogsRoleArn=CloudWatchLogsRoleArn,
KmsKeyId=KmsKeyId,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not r.get("updated"):
ret["result"] = False
ret["comment"] = "Failed to update trail: {}.".format(r["error"]["message"])
ret["changes"] = {}
return ret
if LoggingEnabled:
r = __salt__["boto_cloudtrail.start_logging"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
if not r.get("started"):
ret["result"] = False
ret["comment"] = "Failed to update trail: {}.".format(
r["error"]["message"]
)
ret["changes"] = {}
return ret
else:
r = __salt__["boto_cloudtrail.stop_logging"](
Name=Name, region=region, key=key, keyid=keyid, profile=profile
)
if not r.get("stopped"):
ret["result"] = False
ret["comment"] = "Failed to update trail: {}.".format(
r["error"]["message"]
)
ret["changes"] = {}
return ret
if bool(tagchange):
adds = {}
removes = {}
for k, diff in tagchange.items():
if diff.get("new", "") != "":
# there's an update for this key
adds[k] = Tags[k]
elif diff.get("old", "") != "":
removes[k] = _describe["Tags"][k]
if bool(adds):
r = __salt__["boto_cloudtrail.add_tags"](
Name=Name,
region=region,
key=key,
keyid=keyid,
profile=profile,
**adds
)
if bool(removes):
r = __salt__["boto_cloudtrail.remove_tags"](
Name=Name,
region=region,
key=key,
keyid=keyid,
profile=profile,
**removes
)
return ret
def absent(name, Name, region=None, key=None, keyid=None, profile=None):
"""
Ensure trail with passed properties is absent.
name
The name of the state definition.
Name
Name of the trail.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
"""
ret = {"name": Name, "result": True, "comment": "", "changes": {}}
r = __salt__["boto_cloudtrail.exists"](
Name, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["result"] = False
ret["comment"] = "Failed to delete trail: {}.".format(r["error"]["message"])
return ret
if r and not r["exists"]:
ret["comment"] = "CloudTrail {} does not exist.".format(Name)
return ret
if __opts__["test"]:
ret["comment"] = "CloudTrail {} is set to be removed.".format(Name)
ret["result"] = None
return ret
r = __salt__["boto_cloudtrail.delete"](
Name, region=region, key=key, keyid=keyid, profile=profile
)
if not r["deleted"]:
ret["result"] = False
ret["comment"] = "Failed to delete trail: {}.".format(r["error"]["message"])
return ret
ret["changes"]["old"] = {"trail": Name}
ret["changes"]["new"] = {"trail": None}
ret["comment"] = "CloudTrail {} deleted.".format(Name)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/states/boto_cloudtrail.py | 0.499756 | 0.167117 | boto_cloudtrail.py | pypi |
import logging
from ctypes import (
CDLL,
CFUNCTYPE,
POINTER,
Structure,
c_char,
c_char_p,
c_int,
c_uint,
c_void_p,
cast,
pointer,
sizeof,
)
from ctypes.util import find_library
import salt.utils.user
log = logging.getLogger(__name__)
try:
LIBC = CDLL(find_library("c"))
CALLOC = LIBC.calloc
CALLOC.restype = c_void_p
CALLOC.argtypes = [c_uint, c_uint]
STRDUP = LIBC.strdup
STRDUP.argstypes = [c_char_p]
STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
except Exception: # pylint: disable=broad-except
log.trace("Failed to load libc using ctypes", exc_info=True)
HAS_LIBC = False
else:
HAS_LIBC = True
# Various constants
PAM_PROMPT_ECHO_OFF = 1
PAM_PROMPT_ECHO_ON = 2
PAM_ERROR_MSG = 3
PAM_TEXT_INFO = 4
class PamHandle(Structure):
"""
Wrapper class for pam_handle_t
"""
_fields_ = [("handle", c_void_p)]
def __init__(self):
Structure.__init__(self)
self.handle = 0
class PamMessage(Structure):
"""
Wrapper class for pam_message structure
"""
_fields_ = [
("msg_style", c_int),
("msg", c_char_p),
]
def __repr__(self):
return "<PamMessage {} '{}'>".format(self.msg_style, self.msg)
class PamResponse(Structure):
"""
Wrapper class for pam_response structure
"""
_fields_ = [
("resp", c_char_p),
("resp_retcode", c_int),
]
def __repr__(self):
return "<PamResponse {} '{}'>".format(self.resp_retcode, self.resp)
CONV_FUNC = CFUNCTYPE(
c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p
)
class PamConv(Structure):
"""
Wrapper class for pam_conv structure
"""
_fields_ = [("conv", CONV_FUNC), ("appdata_ptr", c_void_p)]
try:
LIBPAM = CDLL(find_library("pam"))
PAM_START = LIBPAM.pam_start
PAM_START.restype = c_int
PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]
PAM_AUTHENTICATE = LIBPAM.pam_authenticate
PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
PAM_ACCT_MGMT = LIBPAM.pam_acct_mgmt
PAM_ACCT_MGMT.restype = c_int
PAM_ACCT_MGMT.argtypes = [PamHandle, c_int]
PAM_END = LIBPAM.pam_end
PAM_END.restype = c_int
PAM_END.argtypes = [PamHandle, c_int]
except Exception: # pylint: disable=broad-except
log.trace("Failed to load pam using ctypes", exc_info=True)
HAS_PAM = False
else:
HAS_PAM = True
def __virtual__():
"""
Only load on Linux systems
"""
return HAS_LIBC and HAS_PAM
def authenticate(username, password):
"""
Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text
"""
service = __opts__.get("auth.pam.service", "login")
if isinstance(username, str):
username = username.encode(__salt_system_encoding__)
if isinstance(password, str):
password = password.encode(__salt_system_encoding__)
if isinstance(service, str):
service = service.encode(__salt_system_encoding__)
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
"""
Simple conversation function that responds to any
prompt where the echo is off with the supplied password
"""
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = PAM_START(service, username, pointer(conv), pointer(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
PAM_END(handle, retval)
return False
retval = PAM_AUTHENTICATE(handle, 0)
if retval == 0:
retval = PAM_ACCT_MGMT(handle, 0)
PAM_END(handle, 0)
return retval == 0
def auth(username, password, **kwargs):
"""
Authenticate via pam
"""
return authenticate(username, password)
def groups(username, *args, **kwargs):
"""
Retrieve groups for a given user for this auth provider
Uses system groups
"""
return salt.utils.user.get_group_list(username) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/auth/pam.py | 0.409575 | 0.159446 | pam.py | pypi |
import getpass
import logging
import random
import time
from collections.abc import Iterable, Mapping
import salt.channel.client
import salt.config
import salt.exceptions
import salt.loader
import salt.payload
import salt.utils.args
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.minions
import salt.utils.user
import salt.utils.versions
import salt.utils.zeromq
log = logging.getLogger(__name__)
AUTH_INTERNAL_KEYWORDS = frozenset(
[
"client",
"cmd",
"eauth",
"fun",
"gather_job_timeout",
"kwarg",
"match",
"metadata",
"print_event",
"raw",
"yield_pub_data",
]
)
class LoadAuth:
"""
Wrap the authentication system to handle peripheral components
"""
def __init__(self, opts, ckminions=None):
self.opts = opts
self.max_fail = 1.0
self.auth = salt.loader.auth(opts)
self.tokens = salt.loader.eauth_tokens(opts)
self.ckminions = ckminions or salt.utils.minions.CkMinions(opts)
def load_name(self, load):
"""
Return the primary name associate with the load, if an empty string
is returned then the load does not match the function
"""
if "eauth" not in load:
return ""
fstr = "{}.auth".format(load["eauth"])
if fstr not in self.auth:
return ""
try:
pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])["args"][0]
return load[pname_arg]
except IndexError:
return ""
def __auth_call(self, load):
"""
Return the token and set the cache data for use
Do not call this directly! Use the time_auth method to overcome timing
attacks
"""
if "eauth" not in load:
return False
fstr = "{}.auth".format(load["eauth"])
if fstr not in self.auth:
return False
# When making auth calls, only username, password, auth, and token
# are valid, so we strip anything else out.
_valid = ["username", "password", "eauth", "token"]
_load = {key: value for (key, value) in load.items() if key in _valid}
fcall = salt.utils.args.format_call(
self.auth[fstr], _load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS
)
try:
if "kwargs" in fcall:
return self.auth[fstr](*fcall["args"], **fcall["kwargs"])
else:
return self.auth[fstr](*fcall["args"])
except Exception as e: # pylint: disable=broad-except
log.debug("Authentication module threw %s", e)
return False
def time_auth(self, load):
"""
Make sure that all failures happen in the same amount of time
"""
start = time.time()
ret = self.__auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_fail / 4
r_time = random.SystemRandom().uniform(
self.max_fail - deviation, self.max_fail + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False
def __get_acl(self, load):
"""
Returns ACL for a specific user.
Returns None if eauth doesn't provide any for the user. I. e. None means: use acl declared
in master config.
"""
if "eauth" not in load:
return None
mod = self.opts["eauth_acl_module"]
if not mod:
mod = load["eauth"]
fstr = "{}.acl".format(mod)
if fstr not in self.auth:
return None
fcall = salt.utils.args.format_call(
self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS
)
try:
return self.auth[fstr](*fcall["args"], **fcall["kwargs"])
except Exception as e: # pylint: disable=broad-except
log.debug("Authentication module threw %s", e)
return None
def __process_acl(self, load, auth_list):
"""
Allows eauth module to modify the access list right before it'll be applied to the request.
For example ldap auth module expands entries
"""
if "eauth" not in load:
return auth_list
fstr = "{}.process_acl".format(load["eauth"])
if fstr not in self.auth:
return auth_list
try:
return self.auth[fstr](auth_list, self.opts)
except Exception as e: # pylint: disable=broad-except
log.debug("Authentication module threw %s", e)
return auth_list
def get_groups(self, load):
"""
Read in a load and return the groups a user is a member of
by asking the appropriate provider
"""
if "eauth" not in load:
return False
fstr = "{}.groups".format(load["eauth"])
if fstr not in self.auth:
return False
fcall = salt.utils.args.format_call(
self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS
)
try:
return self.auth[fstr](*fcall["args"], **fcall["kwargs"])
except IndexError:
return False
except Exception: # pylint: disable=broad-except
return None
def _allow_custom_expire(self, load):
"""
Return bool if requesting user is allowed to set custom expire
"""
expire_override = self.opts.get("token_expire_user_override", False)
if expire_override is True:
return True
if isinstance(expire_override, Mapping):
expire_whitelist = expire_override.get(load["eauth"], [])
if isinstance(expire_whitelist, Iterable):
if load.get("username") in expire_whitelist:
return True
return False
def mk_token(self, load):
"""
Run time_auth and create a token. Return False or the token
"""
if not self.authenticate_eauth(load):
return {}
if self._allow_custom_expire(load):
token_expire = load.pop("token_expire", self.opts["token_expire"])
else:
_ = load.pop("token_expire", None)
token_expire = self.opts["token_expire"]
tdata = {
"start": time.time(),
"expire": time.time() + token_expire,
"name": self.load_name(load),
"eauth": load["eauth"],
}
if self.opts["keep_acl_in_token"]:
acl_ret = self.__get_acl(load)
tdata["auth_list"] = acl_ret
groups = self.get_groups(load)
if groups:
tdata["groups"] = groups
return self.tokens["{}.mk_token".format(self.opts["eauth_tokens"])](
self.opts, tdata
)
def get_tok(self, tok):
"""
Return the name associated with the token, or False if the token is
not valid
"""
tdata = {}
try:
tdata = self.tokens["{}.get_token".format(self.opts["eauth_tokens"])](
self.opts, tok
)
except salt.exceptions.SaltDeserializationError:
log.warning("Failed to load token %r - removing broken/empty file.", tok)
rm_tok = True
else:
if not tdata:
return {}
rm_tok = False
if tdata.get("expire", 0) < time.time():
# If expire isn't present in the token it's invalid and needs
# to be removed. Also, if it's present and has expired - in
# other words, the expiration is before right now, it should
# be removed.
rm_tok = True
if rm_tok:
self.rm_token(tok)
return {}
return tdata
def list_tokens(self):
"""
List all tokens in eauth_tokn storage.
"""
return self.tokens["{}.list_tokens".format(self.opts["eauth_tokens"])](
self.opts
)
def rm_token(self, tok):
"""
Remove the given token from token storage.
"""
self.tokens["{}.rm_token".format(self.opts["eauth_tokens"])](self.opts, tok)
def authenticate_token(self, load):
"""
Authenticate a user by the token specified in load.
Return the token object or False if auth failed.
"""
token = self.get_tok(load["token"])
# Bail if the token is empty or if the eauth type specified is not allowed
if not token or token["eauth"] not in self.opts["external_auth"]:
log.warning('Authentication failure of type "token" occurred.')
return False
return token
def authenticate_eauth(self, load):
"""
Authenticate a user by the external auth module specified in load.
Return True on success or False on failure.
"""
if "eauth" not in load:
log.warning('Authentication failure of type "eauth" occurred.')
return False
if load["eauth"] not in self.opts["external_auth"]:
log.warning('The eauth system "%s" is not enabled', load["eauth"])
log.warning('Authentication failure of type "eauth" occurred.')
return False
# Perform the actual authentication. If we fail here, do not
# continue.
if not self.time_auth(load):
log.warning('Authentication failure of type "eauth" occurred.')
return False
return True
def authenticate_key(self, load, key):
"""
Authenticate a user by the key passed in load.
Return the effective user id (name) if it's different from the specified one (for sudo).
If the effective user id is the same as the passed one, return True on success or False on
failure.
"""
error_msg = 'Authentication failure of type "user" occurred.'
auth_key = load.pop("key", None)
if auth_key is None:
log.warning(error_msg)
return False
if "user" in load:
auth_user = AuthUser(load["user"])
if auth_user.is_sudo():
# If someone sudos check to make sure there is no ACL's around their username
if auth_key != key[self.opts.get("user", "root")]:
log.warning(error_msg)
return False
return auth_user.sudo_name()
elif (
load["user"] == self.opts.get("user", "root") or load["user"] == "root"
):
if auth_key != key[self.opts.get("user", "root")]:
log.warning(
"Master runs as %r, but user in payload is %r",
self.opts.get("user", "root"),
load["user"],
)
log.warning(error_msg)
return False
elif auth_user.is_running_user():
if auth_key != key.get(load["user"]):
log.warning(error_msg)
return False
elif auth_key == key.get("root"):
pass
else:
if load["user"] in key:
# User is authorised, check key and check perms
if auth_key != key[load["user"]]:
log.warning(error_msg)
return False
return load["user"]
else:
log.warning(error_msg)
return False
else:
if auth_key != key[salt.utils.user.get_user()]:
log.warning(error_msg)
return False
return True
def get_auth_list(self, load, token=None):
"""
Retrieve access list for the user specified in load.
The list is built by eauth module or from master eauth configuration.
Return None if current configuration doesn't provide any ACL for the user. Return an empty
list if the user has no rights to execute anything on this master and returns non-empty list
if user is allowed to execute particular functions.
"""
# Get auth list from token
if token and self.opts["keep_acl_in_token"] and "auth_list" in token:
return token["auth_list"]
# Get acl from eauth module.
auth_list = self.__get_acl(load)
if auth_list is not None:
return auth_list
eauth = token["eauth"] if token else load["eauth"]
if eauth not in self.opts["external_auth"]:
# No matching module is allowed in config
log.debug('The eauth system "%s" is not enabled', eauth)
log.warning("Authorization failure occurred.")
return None
if token:
name = token["name"]
groups = token.get("groups")
else:
name = self.load_name(load) # The username we are attempting to auth with
groups = self.get_groups(load) # The groups this user belongs to
eauth_config = self.opts["external_auth"][eauth]
if not eauth_config:
log.debug('eauth "%s" configuration is empty', eauth)
if not groups:
groups = []
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = self.ckminions.fill_auth_list(eauth_config, name, groups)
auth_list = self.__process_acl(load, auth_list)
log.trace("Compiled auth_list: %s", auth_list)
return auth_list
def check_authentication(self, load, auth_type, key=None, show_username=False):
"""
.. versionadded:: 2018.3.0
Go through various checks to see if the token/eauth/user can be authenticated.
Returns a dictionary containing the following keys:
- auth_list
- username
- error
If an error is encountered, return immediately with the relevant error dictionary
as authentication has failed. Otherwise, return the username and valid auth_list.
"""
auth_list = []
username = load.get("username", "UNKNOWN")
ret = {"auth_list": auth_list, "username": username, "error": {}}
# Authenticate
if auth_type == "token":
token = self.authenticate_token(load)
if not token:
ret["error"] = {
"name": "TokenAuthenticationError",
"message": 'Authentication failure of type "token" occurred.',
}
return ret
# Update username for token
username = token["name"]
ret["username"] = username
auth_list = self.get_auth_list(load, token=token)
elif auth_type == "eauth":
if not self.authenticate_eauth(load):
ret["error"] = {
"name": "EauthAuthenticationError",
"message": 'Authentication failure of type "eauth" occurred for user {}.'.format(
username
),
}
return ret
auth_list = self.get_auth_list(load)
elif auth_type == "user":
auth_ret = self.authenticate_key(load, key)
msg = 'Authentication failure of type "user" occurred'
if not auth_ret: # auth_ret can be a boolean or the effective user id
if show_username:
msg = "{} for user {}.".format(msg, username)
ret["error"] = {"name": "UserAuthenticationError", "message": msg}
return ret
# Verify that the caller has root on master
if auth_ret is not True:
if AuthUser(load["user"]).is_sudo():
if not self.opts["sudo_acl"] or not self.opts["publisher_acl"]:
auth_ret = True
if auth_ret is not True:
# Avoid a circular import
import salt.utils.master
auth_list = salt.utils.master.get_values_of_matching_keys(
self.opts["publisher_acl"], auth_ret
)
if not auth_list:
ret["error"] = {"name": "UserAuthenticationError", "message": msg}
return ret
else:
ret["error"] = {
"name": "SaltInvocationError",
"message": "Authentication type not supported.",
}
return ret
# Authentication checks passed
ret["auth_list"] = auth_list
return ret
class Resolver:
"""
The class used to resolve options for the command line and for generic
interactive interfaces
"""
def __init__(self, opts):
self.opts = opts
self.auth = salt.loader.auth(opts)
def _send_token_request(self, load):
master_uri = "tcp://{}:{}".format(
salt.utils.zeromq.ip_bracket(self.opts["interface"]),
str(self.opts["ret_port"]),
)
with salt.channel.client.ReqChannel.factory(
self.opts, crypt="clear", master_uri=master_uri
) as channel:
return channel.send(load)
def cli(self, eauth):
"""
Execute the CLI options to fill in the extra data needed for the
defined eauth system
"""
ret = {}
if not eauth:
print("External authentication system has not been specified")
return ret
fstr = "{}.auth".format(eauth)
if fstr not in self.auth:
print(
'The specified external authentication system "{}" is not available'.format(
eauth
)
)
print(
"Available eauth types: {}".format(
", ".join(sorted(k[:-5] for k in self.auth if k.endswith(".auth")))
)
)
return ret
args = salt.utils.args.arg_lookup(self.auth[fstr])
for arg in args["args"]:
if arg in self.opts:
ret[arg] = self.opts[arg]
elif arg.startswith("pass"):
ret[arg] = getpass.getpass("{}: ".format(arg))
else:
ret[arg] = input("{}: ".format(arg))
for kwarg, default in list(args["kwargs"].items()):
if kwarg in self.opts:
ret["kwarg"] = self.opts[kwarg]
else:
ret[kwarg] = input("{} [{}]: ".format(kwarg, default))
# Use current user if empty
if "username" in ret and not ret["username"]:
ret["username"] = salt.utils.user.get_user()
return ret
def token_cli(self, eauth, load):
"""
Create the token from the CLI and request the correct data to
authenticate via the passed authentication mechanism
"""
load["cmd"] = "mk_token"
load["eauth"] = eauth
tdata = self._send_token_request(load)
if "token" not in tdata:
return tdata
try:
with salt.utils.files.set_umask(0o177):
with salt.utils.files.fopen(self.opts["token_file"], "w+") as fp_:
fp_.write(tdata["token"])
except OSError:
pass
return tdata
def mk_token(self, load):
"""
Request a token from the master
"""
load["cmd"] = "mk_token"
tdata = self._send_token_request(load)
return tdata
def get_token(self, token):
"""
Request a token from the master
"""
load = {}
load["token"] = token
load["cmd"] = "get_token"
tdata = self._send_token_request(load)
return tdata
class AuthUser:
"""
Represents a user requesting authentication to the salt master
"""
def __init__(self, user):
"""
Instantiate an AuthUser object.
Takes a user to reprsent, as a string.
"""
self.user = user
def is_sudo(self):
"""
Determines if the user is running with sudo
Returns True if the user is running with sudo and False if the
user is not running with sudo
"""
return self.user.startswith("sudo_")
def is_running_user(self):
"""
Determines if the user is the same user as the one running
this process
Returns True if the user is the same user as the one running
this process and False if not.
"""
return self.user == salt.utils.user.get_user()
def sudo_name(self):
"""
Returns the username of the sudoer, i.e. self.user without the
'sudo_' prefix.
"""
return self.user.split("_", 1)[-1] | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/auth/__init__.py | 0.55941 | 0.155399 | __init__.py | pypi |
import salt.ext.tornado.httpserver
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.event
def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
"""
Api to listen for webhooks to send to the reactor.
Implement the webhook behavior in an engine.
:py:class:`rest_cherrypy Webhook docs <salt.netapi.rest_cherrypy.app.Webhook>`
Unlike the rest_cherrypy Webhook, this is only an unauthenticated webhook
endpoint. If an authenticated webhook endpoint is needed, use the salt-api
webhook which runs on the master and authenticates through eauth.
.. note: This is really meant to be used on the minion, because salt-api
needs to be run on the master for use with eauth.
.. warning:: Unauthenticated endpoint
This engine sends webhook calls to the event stream. If the engine is
running on a minion with `file_client: local` the event is sent to the
minion event stream. Otherwise it is sent to the master event stream.
Example Config
.. code-block:: yaml
engines:
- webhook: {}
.. code-block:: yaml
engines:
- webhook:
port: 8000
address: 10.128.1.145
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
.. note: For making an unsigned key, use the following command
`salt-call --local tls.create_self_signed_cert`
"""
if __opts__.get("__role") == "master":
fire_master = salt.utils.event.get_master_event(
__opts__, __opts__["sock_dir"]
).fire_event
else:
fire_master = None
def fire(tag, msg):
"""
How to fire the event
"""
if fire_master:
fire_master(msg, tag)
else:
__salt__["event.send"](tag, msg)
class WebHook(
salt.ext.tornado.web.RequestHandler
): # pylint: disable=abstract-method
def post(self, tag): # pylint: disable=arguments-differ
body = self.request.body
headers = self.request.headers
payload = {
"headers": headers if isinstance(headers, dict) else dict(headers),
"body": body,
}
fire("salt/engines/hook/" + tag, payload)
application = salt.ext.tornado.web.Application([(r"/(.*)", WebHook)])
ssl_options = None
if all([ssl_crt, ssl_key]):
ssl_options = {"certfile": ssl_crt, "keyfile": ssl_key}
io_loop = salt.ext.tornado.ioloop.IOLoop(make_current=False)
io_loop.make_current()
http_server = salt.ext.tornado.httpserver.HTTPServer(
application, ssl_options=ssl_options
)
http_server.listen(port, address=address)
io_loop.start() | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/engines/webhook.py | 0.696062 | 0.235817 | webhook.py | pypi |
import fnmatch
import salt.utils.event
import salt.utils.http
import salt.utils.json
_HEADERS = {"Content-Type": "application/json"}
def _logstash(url, data):
"""
Issues HTTP queries to the logstash server.
"""
result = salt.utils.http.query(
url,
"POST",
header_dict=_HEADERS,
data=salt.utils.json.dumps(data),
decode=True,
status=True,
opts=__opts__,
)
return result
def start(url, funs=None, tags=None):
"""
Listen to salt events and forward them to logstash.
url
The Logstash endpoint.
funs: ``None``
A list of functions to be compared against, looking into the ``fun``
field from the event data. This option helps to select the events
generated by one or more functions.
If an event does not have the ``fun`` field in the data section, it
will be published. For a better selection, consider using the ``tags``
option.
By default, this option accepts any event to be submitted to Logstash.
tags: ``None``
A list of pattern to compare the event tag against.
By default, this option accepts any event to be submitted to Logstash.
"""
if __opts__.get("id").endswith("_master"):
instance = "master"
else:
instance = "minion"
with salt.utils.event.get_event(
instance,
sock_dir=__opts__["sock_dir"],
opts=__opts__,
) as event_bus:
while True:
event = event_bus.get_event(full=True)
if event:
publish = True
if tags and isinstance(tags, list):
found_match = False
for tag in tags:
if fnmatch.fnmatch(event["tag"], tag):
found_match = True
publish = found_match
if funs and "fun" in event["data"]:
if not event["data"]["fun"] in funs:
publish = False
if publish:
_logstash(url, event["data"]) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/engines/http_logstash.py | 0.568895 | 0.16492 | http_logstash.py | pypi |
import logging
import shlex
import subprocess
import time
import salt.loader
import salt.utils.event
import salt.utils.process
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def _read_stdout(proc):
"""
Generator that returns stdout
"""
yield from iter(proc.stdout.readline, b"")
def _get_serializer(output):
"""
Helper to return known serializer based on
pass output argument
"""
serializers = salt.loader.serializers(__opts__)
try:
return getattr(serializers, output)
except AttributeError:
raise CommandExecutionError(
"Unknown serializer `{}` found for output option".format(output)
)
def start(cmd, output="json", interval=1):
"""
Parse stdout of a command and generate an event
The script engine will scrap stdout of the
given script and generate an event based on the
presence of the 'tag' key and its value.
If there is a data obj available, that will also
be fired along with the tag.
Example:
Given the following json output from a script:
.. code-block:: json
{ "tag" : "lots/of/tacos",
"data" : { "toppings" : "cilantro" }
}
This will fire the event 'lots/of/tacos'
on the event bus with the data obj as is.
:param cmd: The command to execute
:param output: How to deserialize stdout of the script
:param interval: How often to execute the script
"""
try:
cmd = shlex.split(cmd)
except AttributeError:
cmd = shlex.split(str(cmd))
log.debug("script engine using command %s", cmd)
serializer = _get_serializer(output)
if __opts__.get("__role") == "master":
fire_master = salt.utils.event.get_master_event(
__opts__, __opts__["sock_dir"]
).fire_event
else:
fire_master = __salt__["event.send"]
while True:
try:
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
log.debug("Starting script with pid %d", proc.pid)
for raw_event in _read_stdout(proc):
log.debug(raw_event)
event = serializer.deserialize(raw_event)
tag = event.get("tag", None)
data = event.get("data", {})
if data and "id" not in data:
data["id"] = __opts__["id"]
if tag:
log.info("script engine firing event with tag %s", tag)
fire_master(tag=tag, data=data)
log.debug("Closing script with pid %d", proc.pid)
proc.stdout.close()
rc = proc.wait()
if rc:
raise subprocess.CalledProcessError(rc, cmd)
except subprocess.CalledProcessError as e:
log.error(e)
finally:
if proc.poll is None:
proc.terminate()
time.sleep(interval) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/engines/script.py | 0.607547 | 0.221098 | script.py | pypi |
import logging
import salt.utils.event
log = logging.getLogger(__name__)
def send(tag, data=None):
"""
Send an event with the given tag and data.
This is useful for sending events directly to the master from the shell
with salt-run. It is also quite useful for sending events in orchestration
states where the ``fire_event`` requisite isn't sufficient because it does
not support sending custom data with the event.
Note that event tags will *not* be namespaced like events sent with the
``fire_event`` requisite! Whereas events produced from ``fire_event`` are
prefixed with ``salt/state_result/<jid>/<minion_id>/<name>``, events sent
using this runner module will have no such prefix. Make sure your reactors
don't expect a prefix!
:param tag: the tag to send with the event
:param data: an optional dictionary of data to send with the event
CLI Example:
.. code-block:: bash
salt-run event.send my/custom/event '{"foo": "bar"}'
Orchestration Example:
.. code-block:: yaml
# orch/command.sls
run_a_command:
salt.function:
- name: cmd.run
- tgt: my_minion
- arg:
- exit {{ pillar['exit_code'] }}
send_success_event:
salt.runner:
- name: event.send
- tag: my_event/success
- data:
foo: bar
- require:
- salt: run_a_command
send_failure_event:
salt.runner:
- name: event.send
- tag: my_event/failure
- data:
baz: qux
- onfail:
- salt: run_a_command
.. code-block:: bash
salt-run state.orchestrate orch.command pillar='{"exit_code": 0}'
salt-run state.orchestrate orch.command pillar='{"exit_code": 1}'
"""
data = data or {}
event = salt.utils.event.get_master_event(
__opts__, __opts__["sock_dir"], listen=False
)
return event.fire_event(data, tag) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/event.py | 0.639061 | 0.294545 | event.py | pypi |
import logging
import salt.utils.json
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.utils.mattermost
# pylint: enable=import-error,no-name-in-module
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
__virtualname__ = "mattermost"
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
return __virtualname__
def _get_hook():
"""
Retrieves and return the Mattermost's configured hook
:return: String: the hook string
"""
hook = __salt__["config.get"]("mattermost.hook") or __salt__["config.get"](
"mattermost:hook"
)
if not hook:
raise SaltInvocationError("No Mattermost Hook found")
return hook
def _get_api_url():
"""
Retrieves and return the Mattermost's configured api url
:return: String: the api url string
"""
api_url = __salt__["config.get"]("mattermost.api_url") or __salt__["config.get"](
"mattermost:api_url"
)
if not api_url:
raise SaltInvocationError("No Mattermost API URL found")
return api_url
def _get_channel():
"""
Retrieves the Mattermost's configured channel
:return: String: the channel string
"""
channel = __salt__["config.get"]("mattermost.channel") or __salt__["config.get"](
"mattermost:channel"
)
return channel
def _get_username():
"""
Retrieves the Mattermost's configured username
:return: String: the username string
"""
username = __salt__["config.get"]("mattermost.username") or __salt__["config.get"](
"mattermost:username"
)
return username
def post_message(message, channel=None, username=None, api_url=None, hook=None):
"""
Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt-run mattermost.post_message message='Build is done'
"""
if not api_url:
api_url = _get_api_url()
if not hook:
hook = _get_hook()
if not username:
username = _get_username()
if not channel:
channel = _get_channel()
if not message:
log.error("message is a required option.")
parameters = dict()
if channel:
parameters["channel"] = channel
if username:
parameters["username"] = username
parameters["text"] = "```" + message + "```" # pre-formatted, fixed-width text
log.debug("Parameters: %s", parameters)
data = salt.utils.json.dumps(parameters)
result = salt.utils.mattermost.query(
api_url=api_url, hook=hook, data="payload={}".format(data)
)
if result:
return True
else:
return result
def post_event(event, channel=None, username=None, api_url=None, hook=None):
"""
Send an event to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param event: The event to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
"""
if not api_url:
api_url = _get_api_url()
if not hook:
hook = _get_hook()
if not username:
username = _get_username()
if not channel:
channel = _get_channel()
if not event:
log.error("message is a required option.")
log.debug("Event: %s", event)
log.debug("Event data: %s", event["data"])
message = "tag: {}\r\n".format(event["tag"])
for key, value in event["data"].items():
message += "{}: {}\r\n".format(key, value)
result = post_message(
message, channel=channel, username=username, api_url=api_url, hook=hook
)
return bool(result) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/mattermost.py | 0.708918 | 0.212702 | mattermost.py | pypi |
import salt.utils.nacl
__virtualname__ = "nacl"
def __virtual__():
if __opts__["fips_mode"] is True:
return False, "nacl runner not available in FIPS mode"
return salt.utils.nacl.check_requirements()
def keygen(sk_file=None, pk_file=None, **kwargs):
"""
Use libnacl to generate a keypair.
If no `sk_file` is defined return a keypair.
If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`.
When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated
using the `sk_file`.
CLI Examples:
.. code-block:: bash
salt-run nacl.keygen
salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-run nacl.keygen
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.keygen(sk_file, pk_file, **kwargs)
def enc(data, **kwargs):
"""
Alias to `{box_type}_encrypt`
box_type: secretbox, sealedbox(default)
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.enc(data, **kwargs)
def enc_file(name, out=None, **kwargs):
"""
This is a helper function to encrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.enc_file name=/tmp/id_rsa
salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.enc_file(name, out, **kwargs)
def dec(data, **kwargs):
"""
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.dec(data, **kwargs)
def dec_file(name, out=None, **kwargs):
"""
This is a helper function to decrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.dec_file(name, out, **kwargs)
def sealedbox_encrypt(data, **kwargs):
"""
Encrypt data using a public key generated from `nacl.keygen`.
The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.sealedbox_encrypt(data, **kwargs)
def sealedbox_decrypt(data, **kwargs):
"""
Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_decrypt pEXHQM6cuaF7A=
salt-run nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-run nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.sealedbox_decrypt(data, **kwargs)
def secretbox_encrypt(data, **kwargs):
"""
Encrypt data using a secret key generated from `nacl.keygen`.
The same secret key can be used to decrypt the data using `nacl.secretbox_decrypt`.
CLI Examples:
.. code-block:: bash
salt-run nacl.secretbox_encrypt datatoenc
salt-run nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-run nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.secretbox_encrypt(data, **kwargs)
def secretbox_decrypt(data, **kwargs):
"""
Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key
that was generated from `nacl.keygen`.
CLI Examples:
.. code-block:: bash
salt-run nacl.secretbox_decrypt pEXHQM6cuaF7A=
salt-run nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-run nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.secretbox_decrypt(data, **kwargs) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/nacl.py | 0.784649 | 0.206354 | nacl.py | pypi |
import copy
import logging
import salt.client
import salt.loader
import salt.pillar
import salt.utils.args
from salt.exceptions import SaltClientError
log = logging.getLogger(__name__)
def cmd(fun, *args, **kwargs):
"""
.. versionchanged:: 2018.3.0
Added ``with_pillar`` argument
Execute ``fun`` with the given ``args`` and ``kwargs``. Parameter ``fun``
should be the string :ref:`name <all-salt.modules>` of the execution module
to call.
.. note::
Execution modules will be loaded *every time* this function is called.
Additionally, keep in mind that since runners execute on the master,
custom execution modules will need to be synced to the master using
:py:func:`salt-run saltutil.sync_modules
<salt.runners.saltutil.sync_modules>`, otherwise they will not be
available.
with_pillar : False
If ``True``, pillar data will be compiled for the master
.. note::
To target the master in the pillar top file, keep in mind that the
default ``id`` for the master is ``<hostname>_master``. This can be
overridden by setting an ``id`` configuration parameter in the
master config file.
CLI Example:
.. code-block:: bash
salt-run salt.cmd test.ping
# call functions with arguments and keyword arguments
salt-run salt.cmd test.arg 1 2 3 a=1
salt-run salt.cmd mymod.myfunc with_pillar=True
"""
log.debug("Called salt.cmd runner with minion function %s", fun)
kwargs = salt.utils.args.clean_kwargs(**kwargs)
with_pillar = kwargs.pop("with_pillar", False)
opts = copy.deepcopy(__opts__)
opts["grains"] = salt.loader.grains(opts)
if with_pillar:
opts["pillar"] = salt.pillar.get_pillar(
opts,
opts["grains"],
opts["id"],
saltenv=opts["saltenv"],
pillarenv=opts.get("pillarenv"),
).compile_pillar()
else:
opts["pillar"] = {}
functions = salt.loader.minion_mods(
opts, utils=salt.loader.utils(opts), context=__context__
)
return (
functions[fun](*args, **kwargs)
if fun in functions
else "'{}' is not available.".format(fun)
)
def execute(
tgt,
fun,
arg=(),
timeout=None,
tgt_type="glob",
ret="",
jid="",
kwarg=None,
**kwargs
):
"""
.. versionadded:: 2017.7.0
Execute ``fun`` on all minions matched by ``tgt`` and ``tgt_type``.
Parameter ``fun`` is the name of execution module function to call.
This function should mainly be used as a helper for runner modules,
in order to avoid redundant code.
For example, when inside a runner one needs to execute a certain function
on arbitrary groups of minions, only has to:
.. code-block:: python
ret1 = __salt__['salt.execute']('*', 'mod.fun')
ret2 = __salt__['salt.execute']('my_nodegroup', 'mod2.fun2', tgt_type='nodegroup')
It can also be used to schedule jobs directly on the master, for example:
.. code-block:: yaml
schedule:
collect_bgp_stats:
function: salt.execute
args:
- edge-routers
- bgp.neighbors
kwargs:
tgt_type: nodegroup
days: 1
returner: redis
"""
with salt.client.get_local_client(__opts__["conf_file"]) as client:
try:
return client.cmd(
tgt,
fun,
arg=arg,
timeout=timeout or __opts__["timeout"],
tgt_type=tgt_type, # no warn_until, as this is introduced only in 2017.7.0
ret=ret,
jid=jid,
kwarg=kwarg,
**kwargs
)
except SaltClientError as client_error:
log.error("Error while executing %s on %s (%s)", fun, tgt, tgt_type)
log.error(client_error)
return {} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/salt.py | 0.672977 | 0.205954 | salt.py | pypi |
import logging
import salt.utils.http
log = logging.getLogger(__name__)
def query(url, output=True, **kwargs):
"""
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
CLI Example:
.. code-block:: bash
salt-run http.query http://somelink.com/
salt-run http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt-run http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
"""
if output is not True:
log.warning("Output option has been deprecated. Please use --quiet.")
if "node" not in kwargs:
kwargs["node"] = "master"
opts = __opts__.copy()
if "opts" in kwargs:
opts.update(kwargs["opts"])
del kwargs["opts"]
ret = salt.utils.http.query(url=url, opts=opts, **kwargs)
return ret
def update_ca_bundle(target=None, source=None, merge_files=None):
"""
Update the local CA bundle file from a URL
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle
salt-run http.update_ca_bundle target=/path/to/cacerts.pem
salt-run http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the master. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the master. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem
"""
return salt.utils.http.update_ca_bundle(target, source, __opts__, merge_files) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/http.py | 0.733452 | 0.174551 | http.py | pypi |
import salt.utils.functools
import salt.utils.json
import salt.utils.pagerduty
import salt.utils.yaml
def __virtual__():
"""
No dependencies outside of what Salt itself requires
"""
return True
def list_services(profile=None, api_key=None):
"""
List services belonging to this account
CLI Example:
salt-run pagerduty.list_services my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"services", "name", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_incidents(profile=None, api_key=None):
"""
List incidents belonging to this account
CLI Example:
salt-run pagerduty.list_incidents my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"incidents", "id", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_users(profile=None, api_key=None):
"""
List users belonging to this account
CLI Example:
salt-run pagerduty.list_users my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"users", "id", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_schedules(profile=None, api_key=None):
"""
List schedules belonging to this account
CLI Example:
salt-run pagerduty.list_schedules my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"schedules", "id", __salt__["config.option"](profile), api_key, opts=__opts__
)
def list_windows(profile=None, api_key=None):
"""
List maintenance windows belonging to this account
CLI Example:
salt-run pagerduty.list_windows my-pagerduty-account
salt-run pagerduty.list_maintenance_windows my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"maintenance_windows",
"id",
__salt__["config.option"](profile),
api_key,
opts=__opts__,
)
# The long version, added for consistency
list_maintenance_windows = salt.utils.functools.alias_function(
list_windows, "list_maintenance_windows"
)
def list_policies(profile=None, api_key=None):
"""
List escalation policies belonging to this account
CLI Example:
salt-run pagerduty.list_policies my-pagerduty-account
salt-run pagerduty.list_escalation_policies my-pagerduty-account
"""
return salt.utils.pagerduty.list_items(
"escalation_policies",
"id",
__salt__["config.option"](profile),
api_key,
opts=__opts__,
)
# The long version, added for consistency
list_escalation_policies = salt.utils.functools.alias_function(
list_policies, "list_escalation_policies"
)
def create_event(
service_key=None, description=None, details=None, incident_key=None, profile=None
):
"""
Create an event in PagerDuty. Designed for use in states.
CLI Example:
.. code-block:: yaml
salt-run pagerduty.create_event <service_key> <description> <details> \
profile=my-pagerduty-account
The following parameters are required:
service_key
This key can be found by using pagerduty.list_services.
description
This is a short description of the event.
details
This can be a more detailed description of the event.
profile
This refers to the configuration profile to use to connect to the
PagerDuty service.
"""
trigger_url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
if isinstance(details, str):
details = salt.utils.yaml.safe_load(details)
if isinstance(details, str):
details = {"details": details}
ret = salt.utils.json.loads(
salt.utils.pagerduty.query(
method="POST",
profile_dict=__salt__["config.option"](profile),
api_key=service_key,
data={
"service_key": service_key,
"incident_key": incident_key,
"event_type": "trigger",
"description": description,
"details": details,
},
url=trigger_url,
opts=__opts__,
)
)
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/pagerduty.py | 0.740925 | 0.201735 | pagerduty.py | pypi |
import fnmatch
import logging
import os
import salt.cache
import salt.config
import salt.fileserver.gitfs
import salt.payload
import salt.pillar.git_pillar
import salt.runners.winrepo
import salt.utils.args
import salt.utils.gitfs
import salt.utils.master
from salt.exceptions import SaltInvocationError
from salt.fileserver import clear_lock as _clear_lock
log = logging.getLogger(__name__)
__func_alias__ = {
"list_": "list",
}
def grains(tgt, tgt_type="glob", **kwargs):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Return cached grains of the targeted minions.
tgt
Target to match minion ids.
.. versionchanged:: 2017.7.5,2018.3.0
The ``tgt`` argument is now required to display cached grains. If
not used, the function will not return grains. This optional
argument will become mandatory in the Salt ``3001`` release.
tgt_type
The type of targeting to use for matching, such as ``glob``, ``list``,
etc.
CLI Example:
.. code-block:: bash
salt-run cache.grains '*'
"""
pillar_util = salt.utils.master.MasterPillarUtil(
tgt, tgt_type, use_cached_grains=True, grains_fallback=False, opts=__opts__
)
cached_grains = pillar_util.get_minion_grains()
return cached_grains
def pillar(tgt=None, tgt_type="glob", **kwargs):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Return cached pillars of the targeted minions if tgt is set.
If tgt is not set will return cached pillars for all minions.
CLI Example:
.. code-block:: bash
salt-run cache.pillar
"""
pillar_util = salt.utils.master.MasterPillarUtil(
tgt,
tgt_type,
use_cached_grains=True,
grains_fallback=False,
use_cached_pillar=True,
pillar_fallback=False,
opts=__opts__,
)
cached_pillar = pillar_util.get_minion_pillar()
return cached_pillar
def mine(tgt=None, tgt_type="glob", **kwargs):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Return cached mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.mine
"""
pillar_util = salt.utils.master.MasterPillarUtil(
tgt,
tgt_type,
use_cached_grains=False,
grains_fallback=False,
use_cached_pillar=False,
pillar_fallback=False,
opts=__opts__,
)
cached_mine = pillar_util.get_cached_mine_data()
return cached_mine
def _clear_cache(
tgt=None,
tgt_type="glob",
clear_pillar_flag=False,
clear_grains_flag=False,
clear_mine_flag=False,
clear_mine_func_flag=None,
):
"""
Clear the cached data/files for the targeted minions.
"""
if tgt is None:
return False
pillar_util = salt.utils.master.MasterPillarUtil(
tgt,
tgt_type,
use_cached_grains=True,
grains_fallback=False,
use_cached_pillar=True,
pillar_fallback=False,
opts=__opts__,
)
return pillar_util.clear_cached_minion_data(
clear_pillar=clear_pillar_flag,
clear_grains=clear_grains_flag,
clear_mine=clear_mine_flag,
clear_mine_func=clear_mine_func_flag,
)
def clear_pillar(tgt=None, tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached pillar data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_pillar
"""
return _clear_cache(tgt, tgt_type, clear_pillar_flag=True)
def clear_grains(tgt=None, tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached grains data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_grains
"""
return _clear_cache(tgt, tgt_type, clear_grains_flag=True)
def clear_mine(tgt=None, tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_mine
"""
return _clear_cache(tgt, tgt_type, clear_mine_flag=True)
def clear_mine_func(tgt=None, tgt_type="glob", clear_mine_func_flag=None):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached mine function data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_mine_func tgt='*' clear_mine_func_flag='network.interfaces'
"""
return _clear_cache(tgt, tgt_type, clear_mine_func_flag=clear_mine_func_flag)
def clear_all(tgt=None, tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached pillar, grains, and mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_all
"""
return _clear_cache(
tgt,
tgt_type,
clear_pillar_flag=True,
clear_grains_flag=True,
clear_mine_flag=True,
)
def clear_git_lock(role, remote=None, **kwargs):
"""
.. versionadded:: 2015.8.2
Remove the update locks for Salt components (gitfs, git_pillar, winrepo)
which use gitfs backend code from salt.utils.gitfs.
.. note::
Running :py:func:`cache.clear_all <salt.runners.cache.clear_all>` will
not include this function as it does for pillar, grains, and mine.
Additionally, executing this function with a ``role`` of ``gitfs`` is
equivalent to running ``salt-run fileserver.clear_lock backend=git``.
role
Which type of lock to remove (``gitfs``, ``git_pillar``, or
``winrepo``)
remote
If specified, then any remotes which contain the passed string will
have their lock cleared. For example, a ``remote`` value of **github**
will remove the lock from all github.com remotes.
type : update,checkout,mountpoint
The types of lock to clear. Can be one or more of ``update``,
``checkout``, and ``mountpoint``, and can be passed either as a
comma-separated or Python list.
.. versionadded:: 2015.8.8
.. versionchanged:: 2018.3.0
``mountpoint`` lock type added
CLI Examples:
.. code-block:: bash
salt-run cache.clear_git_lock gitfs
salt-run cache.clear_git_lock git_pillar
salt-run cache.clear_git_lock git_pillar type=update
salt-run cache.clear_git_lock git_pillar type=update,checkout
salt-run cache.clear_git_lock git_pillar type='["update", "mountpoint"]'
"""
kwargs = salt.utils.args.clean_kwargs(**kwargs)
type_ = salt.utils.args.split_input(
kwargs.pop("type", ["update", "checkout", "mountpoint"])
)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if role == "gitfs":
git_objects = [
salt.utils.gitfs.GitFS(
__opts__,
__opts__["gitfs_remotes"],
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY,
)
]
elif role == "git_pillar":
git_objects = []
for ext_pillar in __opts__["ext_pillar"]:
key = next(iter(ext_pillar))
if key == "git":
if not isinstance(ext_pillar["git"], list):
continue
obj = salt.utils.gitfs.GitPillar(
__opts__,
ext_pillar["git"],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
git_objects.append(obj)
elif role == "winrepo":
winrepo_dir = __opts__["winrepo_dir"]
winrepo_remotes = __opts__["winrepo_remotes"]
git_objects = []
for remotes, base_dir in (
(winrepo_remotes, winrepo_dir),
(__opts__["winrepo_remotes_ng"], __opts__["winrepo_dir_ng"]),
):
obj = salt.utils.gitfs.WinRepo(
__opts__,
remotes,
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES,
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY,
global_only=salt.runners.winrepo.GLOBAL_ONLY,
cache_root=base_dir,
)
git_objects.append(obj)
else:
raise SaltInvocationError("Invalid role '{}'".format(role))
ret = {}
for obj in git_objects:
for lock_type in type_:
cleared, errors = _clear_lock(
obj.clear_lock, role, remote=remote, lock_type=lock_type
)
if cleared:
ret.setdefault("cleared", []).extend(cleared)
if errors:
ret.setdefault("errors", []).extend(errors)
if not ret:
return "No locks were removed"
return ret
def cloud(tgt, provider=None):
"""
Return cloud cache data for target.
.. note:: Only works with glob matching
tgt
Glob Target to match minion ids
provider
Cloud Provider
CLI Example:
.. code-block:: bash
salt-run cache.cloud 'salt*'
salt-run cache.cloud glance.example.org provider=openstack
"""
if not isinstance(tgt, str):
return {}
opts = salt.config.cloud_config(
os.path.join(os.path.dirname(__opts__["conf_file"]), "cloud")
)
if not opts.get("update_cachedir"):
return {}
cloud_cache = __utils__["cloud.list_cache_nodes_full"](opts=opts, provider=provider)
if cloud_cache is None:
return {}
ret = {}
for driver, providers in cloud_cache.items():
for provider, servers in providers.items():
for name, data in servers.items():
if fnmatch.fnmatch(name, tgt):
ret[name] = data
ret[name]["provider"] = provider
return ret
def store(bank, key, data, cachedir=None):
"""
Lists entries stored in the specified bank.
CLI Example:
.. code-block:: bash
salt-run cache.store mycache mykey 'The time has come the walrus said'
"""
if cachedir is None:
cachedir = __opts__["cachedir"]
try:
cache = salt.cache.Cache(__opts__, cachedir=cachedir)
except TypeError:
cache = salt.cache.Cache(__opts__)
return cache.store(bank, key, data)
def list_(bank, cachedir=None):
"""
Lists entries stored in the specified bank.
CLI Example:
.. code-block:: bash
salt-run cache.list cloud/active/ec2/myec2 cachedir=/var/cache/salt/
"""
if cachedir is None:
cachedir = __opts__["cachedir"]
try:
cache = salt.cache.Cache(__opts__, cachedir=cachedir)
except TypeError:
cache = salt.cache.Cache(__opts__)
return cache.list(bank)
def fetch(bank, key, cachedir=None):
"""
Fetch data from a salt.cache bank.
CLI Example:
.. code-block:: bash
salt-run cache.fetch cloud/active/ec2/myec2 myminion cachedir=/var/cache/salt/
"""
if cachedir is None:
cachedir = __opts__["cachedir"]
try:
cache = salt.cache.Cache(__opts__, cachedir=cachedir)
except TypeError:
cache = salt.cache.Cache(__opts__)
return cache.fetch(bank, key)
def flush(bank, key=None, cachedir=None):
"""
Remove the key from the cache bank with all the key content. If no key is
specified remove the entire bank with all keys and sub-banks inside.
CLI Examples:
.. code-block:: bash
salt-run cache.flush cloud/active/ec2/myec2 cachedir=/var/cache/salt/
salt-run cache.flush cloud/active/ec2/myec2 myminion cachedir=/var/cache/salt/
"""
if cachedir is None:
cachedir = __opts__["cachedir"]
try:
cache = salt.cache.Cache(__opts__, cachedir=cachedir)
except TypeError:
cache = salt.cache.Cache(__opts__)
return cache.flush(bank, key) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/cache.py | 0.711431 | 0.224162 | cache.py | pypi |
import salt.fileserver
def envs(backend=None, sources=False):
"""
Return the available fileserver environments. If no backend is provided,
then the environments for all configured backends will be returned.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.envs
salt-run fileserver.envs backend=roots,git
salt-run fileserver.envs git
"""
fileserver = salt.fileserver.Fileserver(__opts__)
return sorted(fileserver.envs(back=backend, sources=sources))
def clear_file_list_cache(saltenv=None, backend=None):
"""
.. versionadded:: 2016.11.0
The Salt fileserver caches the files/directories/symlinks for each
fileserver backend and environment as they are requested. This is done to
help the fileserver scale better. Without this caching, when
hundreds/thousands of minions simultaneously ask the master what files are
available, this would cause the master's CPU load to spike as it obtains
the same information separately for each minion.
saltenv
By default, this runner will clear the file list caches for all
environments. This argument allows for a list of environments to be
passed, to clear more selectively. This list can be passed either as a
comma-separated string, or a Python list.
backend
Similar to the ``saltenv`` parameter, this argument will restrict the
cache clearing to specific fileserver backends (the default behavior is
to clear from all enabled fileserver backends). This list can be passed
either as a comma-separated string, or a Python list.
.. note:
The maximum age for the cached file lists (i.e. the age at which the
cache will be disregarded and rebuilt) is defined by the
:conf_master:`fileserver_list_cache_time` configuration parameter.
Since the ability to clear these caches is often required by users writing
custom runners which add/remove files, this runner can easily be called
from within a custom runner using any of the following examples:
.. code-block:: python
# Clear all file list caches
__salt__['fileserver.clear_file_list_cache']()
# Clear just the 'base' saltenv file list caches
__salt__['fileserver.clear_file_list_cache'](saltenv='base')
# Clear just the 'base' saltenv file list caches from just the 'roots'
# fileserver backend
__salt__['fileserver.clear_file_list_cache'](saltenv='base', backend='roots')
# Clear all file list caches from the 'roots' fileserver backend
__salt__['fileserver.clear_file_list_cache'](backend='roots')
.. note::
In runners, the ``__salt__`` dictionary will likely be renamed to
``__runner__`` in a future Salt release to distinguish runner functions
from remote execution functions. See `this GitHub issue`_ for
discussion/updates on this.
.. _`this GitHub issue`: https://github.com/saltstack/salt/issues/34958
If using Salt's Python API (not a runner), the following examples are
equivalent to the ones above:
.. code-block:: python
import salt.config
import salt.runner
opts = salt.config.master_config('/etc/salt/master')
opts['fun'] = 'fileserver.clear_file_list_cache'
# Clear all file list_caches
opts['arg'] = [] # No arguments
runner = salt.runner.Runner(opts)
cleared = runner.run()
# Clear just the 'base' saltenv file list caches
opts['arg'] = ['base', None]
runner = salt.runner.Runner(opts)
cleared = runner.run()
# Clear just the 'base' saltenv file list caches from just the 'roots'
# fileserver backend
opts['arg'] = ['base', 'roots']
runner = salt.runner.Runner(opts)
cleared = runner.run()
# Clear all file list caches from the 'roots' fileserver backend
opts['arg'] = [None, 'roots']
runner = salt.runner.Runner(opts)
cleared = runner.run()
This function will return a dictionary showing a list of environments which
were cleared for each backend. An empty return dictionary means that no
changes were made.
CLI Examples:
.. code-block:: bash
# Clear all file list caches
salt-run fileserver.clear_file_list_cache
# Clear just the 'base' saltenv file list caches
salt-run fileserver.clear_file_list_cache saltenv=base
# Clear just the 'base' saltenv file list caches from just the 'roots'
# fileserver backend
salt-run fileserver.clear_file_list_cache saltenv=base backend=roots
# Clear all file list caches from the 'roots' fileserver backend
salt-run fileserver.clear_file_list_cache backend=roots
"""
fileserver = salt.fileserver.Fileserver(__opts__)
load = {"saltenv": saltenv, "fsbackend": backend}
return fileserver.clear_file_list_cache(load=load)
def file_list(saltenv="base", backend=None):
"""
Return a list of files from the salt fileserver
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
.. note:
Keep in mind that executing this function spawns a new process,
separate from the master. This means that if the fileserver
configuration has been changed in some way since the master has been
restarted (e.g. if :conf_master:`fileserver_backend`,
:conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
been updated), then the results of this runner will not accurately
reflect what files are available to minions.
When in doubt, use :py:func:`cp.list_master
<salt.modules.cp.list_master>` to see what files the minion can see,
and always remember to restart the salt-master daemon when updating
the fileserver configuration.
CLI Examples:
.. code-block:: bash
salt-run fileserver.file_list
salt-run fileserver.file_list saltenv=prod
salt-run fileserver.file_list saltenv=dev backend=git
salt-run fileserver.file_list base hg,roots
salt-run fileserver.file_list -git
"""
fileserver = salt.fileserver.Fileserver(__opts__)
load = {"saltenv": saltenv, "fsbackend": backend}
return fileserver.file_list(load=load)
def symlink_list(saltenv="base", backend=None):
"""
Return a list of symlinked files and dirs
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
.. note:
Keep in mind that executing this function spawns a new process,
separate from the master. This means that if the fileserver
configuration has been changed in some way since the master has been
restarted (e.g. if :conf_master:`fileserver_backend`,
:conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
been updated), then the results of this runner will not accurately
reflect what symlinks are available to minions.
When in doubt, use :py:func:`cp.list_master_symlinks
<salt.modules.cp.list_master_symlinks>` to see what symlinks the minion
can see, and always remember to restart the salt-master daemon when
updating the fileserver configuration.
CLI Example:
.. code-block:: bash
salt-run fileserver.symlink_list
salt-run fileserver.symlink_list saltenv=prod
salt-run fileserver.symlink_list saltenv=dev backend=git
salt-run fileserver.symlink_list base hg,roots
salt-run fileserver.symlink_list -git
"""
fileserver = salt.fileserver.Fileserver(__opts__)
load = {"saltenv": saltenv, "fsbackend": backend}
return fileserver.symlink_list(load=load)
def dir_list(saltenv="base", backend=None):
"""
Return a list of directories in the given environment
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
.. note:
Keep in mind that executing this function spawns a new process,
separate from the master. This means that if the fileserver
configuration has been changed in some way since the master has been
restarted (e.g. if :conf_master:`fileserver_backend`,
:conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
been updated), then the results of this runner will not accurately
reflect what dirs are available to minions.
When in doubt, use :py:func:`cp.list_master_dirs
<salt.modules.cp.list_master_dirs>` to see what dirs the minion can see,
and always remember to restart the salt-master daemon when updating
the fileserver configuration.
CLI Example:
.. code-block:: bash
salt-run fileserver.dir_list
salt-run fileserver.dir_list saltenv=prod
salt-run fileserver.dir_list saltenv=dev backend=git
salt-run fileserver.dir_list base hg,roots
salt-run fileserver.dir_list -git
"""
fileserver = salt.fileserver.Fileserver(__opts__)
load = {"saltenv": saltenv, "fsbackend": backend}
return fileserver.dir_list(load=load)
def empty_dir_list(saltenv="base", backend=None):
"""
.. versionadded:: 2015.5.0
Return a list of empty directories in the given environment
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. note::
Some backends (such as :mod:`git <salt.fileserver.gitfs>` and
:mod:`hg <salt.fileserver.hgfs>`) do not support empty directories.
So, passing ``backend=git`` or ``backend=hg`` will result in an
empty list being returned.
CLI Example:
.. code-block:: bash
salt-run fileserver.empty_dir_list
salt-run fileserver.empty_dir_list saltenv=prod
salt-run fileserver.empty_dir_list backend=roots
"""
fileserver = salt.fileserver.Fileserver(__opts__)
load = {"saltenv": saltenv, "fsbackend": backend}
return fileserver.file_list_emptydirs(load=load)
def update(backend=None, **kwargs):
"""
Update the fileserver cache. If no backend is provided, then the cache for
all configured backends will be updated.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
kwargs
Pass additional arguments to backend. See example below
CLI Example:
.. code-block:: bash
salt-run fileserver.update
salt-run fileserver.update backend=roots,git
salt-run fileserver.update backend=git remotes=myrepo,yourrepo
"""
fileserver = salt.fileserver.Fileserver(__opts__)
fileserver.update(back=backend, **kwargs)
return True
def clear_cache(backend=None):
"""
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
"""
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret["cleared"] = cleared
if errors:
ret["errors"] = errors
if not ret:
return "No cache was cleared"
return ret
def clear_lock(backend=None, remote=None):
"""
.. versionadded:: 2015.5.0
Clear the fileserver update lock from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). This should only need to be done if a fileserver
update was interrupted and a remote is not updating (generating a warning
in the Master's log file). Executing this runner with no arguments will
remove all update locks from all enabled VCS fileserver backends, but this
can be narrowed by using the following arguments:
backend
Only clear the update lock for the specified backend(s).
remote
If specified, then any remotes which contain the passed string will
have their lock cleared. For example, a ``remote`` value of **github**
will remove the lock from all github.com remotes.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_lock
salt-run fileserver.clear_lock backend=git,hg
salt-run fileserver.clear_lock backend=git remote=github
salt-run fileserver.clear_lock remote=bitbucket
"""
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_lock(back=backend, remote=remote)
ret = {}
if cleared:
ret["cleared"] = cleared
if errors:
ret["errors"] = errors
if not ret:
return "No locks were removed"
return ret
def lock(backend=None, remote=None):
"""
.. versionadded:: 2015.5.0
Set a fileserver update lock for VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`).
.. note::
This will only operate on enabled backends (those configured in
:conf_master:`fileserver_backend`).
backend
Only set the update lock for the specified backend(s).
remote
If not None, then any remotes which contain the passed string will have
their lock cleared. For example, a ``remote`` value of ``*github.com*``
will remove the lock from all github.com remotes.
CLI Example:
.. code-block:: bash
salt-run fileserver.lock
salt-run fileserver.lock backend=git,hg
salt-run fileserver.lock backend=git remote='*github.com*'
salt-run fileserver.lock remote=bitbucket
"""
fileserver = salt.fileserver.Fileserver(__opts__)
locked, errors = fileserver.lock(back=backend, remote=remote)
ret = {}
if locked:
ret["locked"] = locked
if errors:
ret["errors"] = errors
if not ret:
return "No locks were set"
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/fileserver.py | 0.843122 | 0.251349 | fileserver.py | pypi |
import logging
import salt.pillar
import salt.utils.minions
log = logging.getLogger(__name__)
def show_top(minion=None, saltenv="base"):
"""
Returns the compiled top data for pillar for a specific minion. If no
minion is specified, we use the first minion we find.
CLI Example:
.. code-block:: bash
salt-run pillar.show_top
"""
id_, grains, _ = salt.utils.minions.get_minion_data(minion, __opts__)
pillar = salt.pillar.Pillar(__opts__, grains, id_, saltenv)
top, errors = pillar.get_top()
if errors:
__jid_event__.fire_event({"data": errors, "outputter": "nested"}, "progress")
return errors
return top
def show_pillar(minion="*", **kwargs):
"""
Returns the compiled pillar either of a specific minion
or just the global available pillars. This function assumes
that no minion has the id ``*``.
Function also accepts pillarenv as attribute in order to limit to a specific pillar branch of git
CLI Example:
shows minion specific pillar:
.. code-block:: bash
salt-run pillar.show_pillar 'www.example.com'
shows global pillar:
.. code-block:: bash
salt-run pillar.show_pillar
shows global pillar for 'dev' pillar environment:
(note that not specifying pillarenv will merge all pillar environments
using the master config option pillar_source_merging_strategy.)
.. code-block:: bash
salt-run pillar.show_pillar 'pillarenv=dev'
shows global pillar for 'dev' pillar environment and specific pillarenv = dev:
.. code-block:: bash
salt-run pillar.show_pillar 'saltenv=dev' 'pillarenv=dev'
API Example:
.. code-block:: python
import salt.config
import salt.runner
opts = salt.config.master_config('/etc/salt/master')
runner = salt.runner.RunnerClient(opts)
pillar = runner.cmd('pillar.show_pillar', [])
print(pillar)
"""
pillarenv = None
saltenv = "base"
id_, grains, _ = salt.utils.minions.get_minion_data(minion, __opts__)
if grains is None:
grains = {"fqdn": minion}
for key in kwargs:
if key == "saltenv":
saltenv = kwargs[key]
elif key == "pillarenv":
# pillarenv overridden on CLI
pillarenv = kwargs[key]
else:
grains[key] = kwargs[key]
pillar = salt.pillar.Pillar(__opts__, grains, id_, saltenv, pillarenv=pillarenv)
compiled_pillar = pillar.compile_pillar()
return compiled_pillar
def clear_pillar_cache(minion="*", **kwargs):
"""
Clears the cached values when using pillar_cache
.. versionadded:: 3003
CLI Example:
Clears the pillar cache for a specific minion:
.. code-block:: bash
salt-run pillar.clear_pillar_cache 'minion'
"""
if not __opts__.get("pillar_cache"):
log.info("The pillar_cache is set to False or not enabled.")
return False
ckminions = salt.utils.minions.CkMinions(__opts__)
ret = ckminions.check_minions(minion)
pillarenv = kwargs.pop("pillarenv", None)
saltenv = kwargs.pop("saltenv", "base")
pillar_cache = {}
for tgt in ret.get("minions", []):
id_, grains, _ = salt.utils.minions.get_minion_data(tgt, __opts__)
for key in kwargs:
grains[key] = kwargs[key]
if grains is None:
grains = {"fqdn": minion}
pillar = salt.pillar.PillarCache(
__opts__, grains, id_, saltenv, pillarenv=pillarenv
)
pillar.clear_pillar()
if __opts__.get("pillar_cache_backend") == "memory":
_pillar_cache = pillar.cache
else:
_pillar_cache = pillar.cache._dict
if tgt in _pillar_cache and _pillar_cache[tgt]:
pillar_cache[tgt] = _pillar_cache.get(tgt).get(pillarenv)
return pillar_cache
def show_pillar_cache(minion="*", **kwargs):
"""
Shows the cached values in pillar_cache
.. versionadded:: 3003
CLI Example:
Shows the pillar cache for a specific minion:
.. code-block:: bash
salt-run pillar.show_pillar_cache 'minion'
"""
if not __opts__.get("pillar_cache"):
log.info("The pillar_cache is set to False or not enabled.")
return False
ckminions = salt.utils.minions.CkMinions(__opts__)
ret = ckminions.check_minions(minion)
pillarenv = kwargs.pop("pillarenv", None)
saltenv = kwargs.pop("saltenv", "base")
pillar_cache = {}
for tgt in ret.get("minions", []):
id_, grains, _ = salt.utils.minions.get_minion_data(tgt, __opts__)
for key in kwargs:
grains[key] = kwargs[key]
if grains is None:
grains = {"fqdn": minion}
pillar = salt.pillar.PillarCache(
__opts__, grains, id_, saltenv, pillarenv=pillarenv
)
if __opts__.get("pillar_cache_backend") == "memory":
_pillar_cache = pillar.cache
else:
_pillar_cache = pillar.cache._dict
if tgt in _pillar_cache and _pillar_cache[tgt]:
pillar_cache[tgt] = _pillar_cache[tgt].get(pillarenv)
return pillar_cache | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/pillar.py | 0.78016 | 0.254202 | pillar.py | pypi |
import logging
import salt.loader
import salt.utils.event
import salt.utils.functools
import salt.utils.jid
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def pause(jid, state_id=None, duration=None):
"""
Set up a state id pause, this instructs a running state to pause at a given
state id. This needs to pass in the jid of the running state and can
optionally pass in a duration in seconds.
"""
minion = salt.minion.MasterMinion(__opts__)
minion.functions["state.pause"](jid, state_id, duration)
set_pause = salt.utils.functools.alias_function(pause, "set_pause")
def resume(jid, state_id=None):
"""
Remove a pause from a jid, allowing it to continue
"""
minion = salt.minion.MasterMinion(__opts__)
minion.functions["state.resume"](jid, state_id)
rm_pause = salt.utils.functools.alias_function(resume, "rm_pause")
def soft_kill(jid, state_id=None):
"""
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the beginning of the next state run.
"""
minion = salt.minion.MasterMinion(__opts__)
minion.functions["state.soft_kill"](jid, state_id)
def orchestrate(
mods,
saltenv="base",
test=None,
exclude=None,
pillar=None,
pillarenv=None,
pillar_enc=None,
orchestration_jid=None,
):
"""
.. versionadded:: 0.17.0
Execute a state run from the master, used as a powerful orchestration
system.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:mod:`Docs for the master-side state module <salt.states.saltmod>`
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver
salt-run state.orchestrate webserver saltenv=dev test=True
salt-run state.orchestrate webserver saltenv=dev pillarenv=aws
.. versionchanged:: 2014.1.1
Runner renamed from ``state.sls`` to ``state.orchestrate``
.. versionchanged:: 2014.7.0
Runner uses the pillar variable
.. versionchanged:: 2017.5
Runner uses the pillar_enc variable that allows renderers to render the pillar.
This is usable when supplying the contents of a file as pillar, and the file contains
gpg-encrypted entries.
.. seealso:: GPG renderer documentation
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver pillar_enc=gpg pillar="$(cat somefile.json)"
"""
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError("Pillar data must be formatted as a dictionary")
__opts__["file_client"] = "local"
minion = salt.minion.MasterMinion(__opts__)
if pillarenv is None and "pillarenv" in __opts__:
pillarenv = __opts__["pillarenv"]
if saltenv is None and "saltenv" in __opts__:
saltenv = __opts__["saltenv"]
if orchestration_jid is None:
orchestration_jid = salt.utils.jid.gen_jid(__opts__)
running = minion.functions["state.sls"](
mods,
test,
exclude,
pillar=pillar,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_enc=pillar_enc,
__pub_jid=orchestration_jid,
orchestration_jid=orchestration_jid,
)
ret = {"data": {minion.opts["id"]: running}, "outputter": "highstate"}
res = __utils__["state.check_result"](ret["data"])
if res:
ret["retcode"] = 0
else:
ret["retcode"] = 1
return ret
# Aliases for orchestrate runner
orch = salt.utils.functools.alias_function(orchestrate, "orch")
sls = salt.utils.functools.alias_function(orchestrate, "sls")
def orchestrate_single(fun, name, test=None, queue=False, pillar=None, **kwargs):
"""
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_single fun=salt.wheel name=key.list_all
"""
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError("Pillar data must be formatted as a dictionary")
__opts__["file_client"] = "local"
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions["state.single"](
fun, name, test=None, queue=False, pillar=pillar, **kwargs
)
ret = {minion.opts["id"]: running}
__jid_event__.fire_event({"data": ret, "outputter": "highstate"}, "progress")
return ret
def orchestrate_high(data, test=None, queue=False, pillar=None, **kwargs):
"""
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_high '{
stage_one:
{salt.state: [{tgt: "db*"}, {sls: postgres_setup}]},
stage_two:
{salt.state: [{tgt: "web*"}, {sls: apache_setup}, {
require: [{salt: stage_one}],
}]},
}'
"""
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError("Pillar data must be formatted as a dictionary")
__opts__["file_client"] = "local"
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions["state.high"](
data, test=None, queue=False, pillar=pillar, **kwargs
)
ret = {minion.opts["id"]: running}
__jid_event__.fire_event({"data": ret, "outputter": "highstate"}, "progress")
return ret
def orchestrate_show_sls(
mods,
saltenv="base",
test=None,
queue=False,
pillar=None,
pillarenv=None,
pillar_enc=None,
):
"""
Display the state data from a specific sls, or list of sls files, after
being render using the master minion.
Note, the master minion adds a "_master" suffix to its minion id.
.. seealso:: The state.show_sls module function
CLI Example:
.. code-block:: bash
salt-run state.orch_show_sls my-orch-formula.my-orch-state 'pillar={ nodegroup: ng1 }'
"""
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError("Pillar data must be formatted as a dictionary")
__opts__["file_client"] = "local"
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions["state.show_sls"](
mods,
test,
queue,
pillar=pillar,
pillarenv=pillarenv,
pillar_enc=pillar_enc,
saltenv=saltenv,
)
ret = {minion.opts["id"]: running}
return ret
orch_show_sls = salt.utils.functools.alias_function(
orchestrate_show_sls, "orch_show_sls"
)
def event(
tagmatch="*", count=-1, quiet=False, sock_dir=None, pretty=False, node="master"
):
r"""
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2014.7.0
.. versionchanged:: 2019.2.0
``tagmatch`` can now be either a glob or regular expression.
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this glob or regular expression.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
.. versionadded:: 2016.3.0
CLI Examples:
.. code-block:: bash
# Reboot a minion and run highstate when it comes back online
salt 'jerry' system.reboot && \\
salt-run state.event 'salt/minion/jerry/start' count=1 quiet=True && \\
salt 'jerry' state.highstate
# Reboot multiple minions and run highstate when all are back online
salt -L 'kevin,stewart,dave' system.reboot && \\
salt-run state.event 'salt/minion/*/start' count=3 quiet=True && \\
salt -L 'kevin,stewart,dave' state.highstate
# Watch the event bus forever in a shell while-loop.
salt-run state.event | while read -r tag data; do
echo $tag
echo $data | jq --color-output .
done
.. seealso::
See :blob:`tests/eventlisten.sh` for an example of usage within a shell
script.
"""
statemod = salt.loader.raw_mod(__opts__, "state", None)
return statemod["state.event"](
tagmatch=tagmatch,
count=count,
quiet=quiet,
sock_dir=sock_dir,
pretty=pretty,
node=node,
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/state.py | 0.685213 | 0.238107 | state.py | pypi |
import logging
import salt.pillar.git_pillar
import salt.utils.gitfs
from salt.exceptions import SaltRunnerError
log = logging.getLogger(__name__)
def update(branch=None, repo=None):
"""
.. versionadded:: 2014.1.0
.. versionchanged:: 2015.8.4
This runner function now supports the :ref:`git_pillar
configuration schema <git-pillar-configuration>` introduced in
2015.8.0. Additionally, the branch and repo can now be omitted to
update all git_pillar remotes. The return data has also changed to
a dictionary. The values will be ``True`` only if new commits were
fetched, and ``False`` if there were errors or no new commits were
fetched.
.. versionchanged:: 2018.3.0
The return for a given git_pillar remote will now be ``None`` when no
changes were fetched. ``False`` now is reserved only for instances in
which there were errors.
.. versionchanged:: 3001
The repo parameter also matches against the repo name.
Fetch one or all configured git_pillar remotes.
.. note::
This will *not* fast-forward the git_pillar cachedir on the master. All
it does is perform a ``git fetch``. If this runner is executed with
``-l debug``, you may see a log message that says that the repo is
up-to-date. Keep in mind that Salt automatically fetches git_pillar
repos roughly every 60 seconds (or whatever
:conf_master:`loop_interval` is set to). So, it is possible that the
repo was fetched automatically in the time between when changes were
pushed to the repo, and when this runner was executed. When in doubt,
simply refresh pillar data using :py:func:`saltutil.refresh_pillar
<salt.modules.saltutil.refresh_pillar>` and then use
:py:func:`pillar.item <salt.modules.pillar.item>` to check if the
pillar data has changed as expected.
CLI Example:
.. code-block:: bash
# Update specific branch and repo
salt-run git_pillar.update branch='branch' repo='https://foo.com/bar.git'
# Update specific repo, by name
salt-run git_pillar.update repo=myrepo
# Update all repos
salt-run git_pillar.update
# Run with debug logging
salt-run git_pillar.update -l debug
"""
ret = {}
for ext_pillar in __opts__.get("ext_pillar", []):
pillar_type = next(iter(ext_pillar))
if pillar_type != "git":
continue
pillar_conf = ext_pillar[pillar_type]
pillar = salt.utils.gitfs.GitPillar(
__opts__,
pillar_conf,
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
for remote in pillar.remotes:
# Skip this remote if it doesn't match the search criteria
if branch is not None:
if branch != remote.branch:
continue
if repo is not None:
if repo != remote.url and repo != getattr(remote, "name", None):
continue
try:
result = remote.fetch()
except Exception as exc: # pylint: disable=broad-except
log.error(
"Exception '%s' caught while fetching git_pillar remote '%s'",
exc,
remote.id,
exc_info_on_loglevel=logging.DEBUG,
)
result = False
finally:
remote.clear_lock()
ret[remote.id] = result
if not ret:
if branch is not None or repo is not None:
raise SaltRunnerError(
"Specified git branch/repo not found in ext_pillar config"
)
else:
raise SaltRunnerError("No git_pillar remotes are configured")
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/git_pillar.py | 0.681197 | 0.231783 | git_pillar.py | pypi |
import salt.utils.sdb
__func_alias__ = {
"set_": "set",
}
def get(uri):
"""
Get a value from a db, using a uri in the form of sdb://<profile>/<key>. If
the uri provided does not start with sdb://, then it will be returned as-is.
CLI Example:
.. code-block:: bash
salt-run sdb.get sdb://mymemcached/foo
"""
return salt.utils.sdb.sdb_get(uri, __opts__, __utils__)
def set_(uri, value):
"""
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
successfully set, return ``False``.
CLI Example:
.. code-block:: bash
salt-run sdb.set sdb://mymemcached/foo bar
"""
return salt.utils.sdb.sdb_set(uri, value, __opts__, __utils__)
def delete(uri):
"""
Delete a value from a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
successfully deleted, return ``False``.
CLI Example:
.. code-block:: bash
salt-run sdb.delete sdb://mymemcached/foo
"""
return salt.utils.sdb.sdb_delete(uri, __opts__, __utils__)
def get_or_set_hash(
uri, length=8, chars="abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
):
"""
Perform a one-time generation of a hash and write it to sdb.
If that value has already been set return the value instead.
This is useful for generating passwords or keys that are specific to
multiple minions that need to be stored somewhere centrally.
CLI Example:
.. code-block:: bash
salt-run sdb.get_or_set_hash 'SECRET_KEY' 50
.. warning::
This function could return strings which may contain characters which are reserved
as directives by the YAML parser, such as strings beginning with ``%``. To avoid
issues when using the output of this function in an SLS file containing YAML+Jinja,
surround the call with single quotes.
"""
return salt.utils.sdb.sdb_get_or_set_hash(uri, __opts__, length, chars, __utils__) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/runners/sdb.py | 0.814828 | 0.269387 | sdb.py | pypi |
import ast
import logging
import re
import salt.loader
import salt.utils.beacons
try:
from pyroute2 import IPDB
IP = IPDB()
HAS_PYROUTE2 = True
except ImportError:
IP = None
HAS_PYROUTE2 = False
log = logging.getLogger(__name__)
__virtualname__ = "network_settings"
ATTRS = [
"family",
"txqlen",
"ipdb_scope",
"index",
"operstate",
"group",
"carrier_changes",
"ipaddr",
"neighbours",
"ifname",
"promiscuity",
"linkmode",
"broadcast",
"address",
"num_tx_queues",
"ipdb_priority",
"kind",
"qdisc",
"mtu",
"num_rx_queues",
"carrier",
"flags",
"ifi_type",
"ports",
]
LAST_STATS = {}
class Hashabledict(dict):
"""
Helper class that implements a hash function for a dictionary
"""
def __hash__(self):
return hash(tuple(sorted(self.items())))
def __virtual__():
if HAS_PYROUTE2:
return __virtualname__
err_msg = "pyroute2 library is missing"
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
def validate(config):
"""
Validate the beacon configuration
"""
if not isinstance(config, list):
return False, "Configuration for network_settings beacon must be a list."
else:
config = salt.utils.beacons.list_to_dict(config)
interfaces = config.get("interfaces", {})
if isinstance(interfaces, list):
# Old syntax
return (
False,
"interfaces section for network_settings beacon must be a dictionary.",
)
for item in interfaces:
if not isinstance(config["interfaces"][item], dict):
return (
False,
"Interface attributes for network_settings beacon"
" must be a dictionary.",
)
if not all(j in ATTRS for j in config["interfaces"][item]):
return False, "Invalid attributes in beacon configuration."
return True, "Valid beacon configuration"
def _copy_interfaces_info(interfaces):
"""
Return a dictionary with a copy of each interface attributes in ATTRS
"""
ret = {}
for interface in interfaces:
_interface_attrs_cpy = set()
for attr in ATTRS:
if attr in interfaces[interface]:
attr_dict = Hashabledict()
attr_dict[attr] = repr(interfaces[interface][attr])
_interface_attrs_cpy.add(attr_dict)
ret[interface] = _interface_attrs_cpy
return ret
def beacon(config):
"""
Watch for changes on network settings
By default, the beacon will emit when there is a value change on one of the
settings on watch. The config also support the onvalue parameter for each
setting, which instruct the beacon to only emit if the setting changed to
the value defined.
Example Config
.. code-block:: yaml
beacons:
network_settings:
- interfaces:
eth0:
ipaddr:
promiscuity:
onvalue: 1
eth1:
linkmode:
The config above will check for value changes on eth0 ipaddr and eth1 linkmode. It will also
emit if the promiscuity value changes to 1.
Beacon items can use the * wildcard to make a definition apply to several interfaces. For
example an eth* would apply to all ethernet interfaces.
Setting the argument coalesce = True will combine all the beacon results on a single event.
The example below shows how to trigger coalesced results:
.. code-block:: yaml
beacons:
network_settings:
- coalesce: True
- interfaces:
eth0:
ipaddr:
promiscuity:
"""
_config = salt.utils.beacons.list_to_dict(config)
ret = []
interfaces = []
expanded_config = {"interfaces": {}}
global LAST_STATS
coalesce = False
_stats = _copy_interfaces_info(IP.by_name)
if not LAST_STATS:
LAST_STATS = _stats
if "coalesce" in _config and _config["coalesce"]:
coalesce = True
changes = {}
log.debug("_stats %s", _stats)
# Get list of interfaces included in config that are registered in the
# system, including interfaces defined by wildcards (eth*, wlan*)
for interface_config in _config.get("interfaces", {}):
if interface_config in _stats:
interfaces.append(interface_config)
else:
# No direct match, try with * wildcard regexp
for interface_stat in _stats:
match = re.search(interface_config, interface_stat)
if match:
interfaces.append(interface_stat)
expanded_config["interfaces"][interface_stat] = _config[
"interfaces"
][interface_config]
if expanded_config:
_config["interfaces"].update(expanded_config["interfaces"])
# config updated so update config
_config = salt.utils.beacons.list_to_dict(config)
log.debug("interfaces %s", interfaces)
for interface in interfaces:
_send_event = False
_diff_stats = _stats[interface] - LAST_STATS[interface]
_ret_diff = {}
interface_config = _config["interfaces"][interface]
log.debug("_diff_stats %s", _diff_stats)
if _diff_stats:
_diff_stats_dict = {}
LAST_STATS[interface] = _stats[interface]
for item in _diff_stats:
_diff_stats_dict.update(item)
for attr in interface_config:
if attr in _diff_stats_dict:
config_value = None
if interface_config[attr] and "onvalue" in interface_config[attr]:
config_value = interface_config[attr]["onvalue"]
new_value = ast.literal_eval(_diff_stats_dict[attr])
if not config_value or config_value == new_value:
_send_event = True
_ret_diff[attr] = new_value
if _send_event:
if coalesce:
changes[interface] = _ret_diff
else:
ret.append(
{"tag": interface, "interface": interface, "change": _ret_diff}
)
if coalesce and changes:
grains_info = salt.loader.grains(__opts__, True)
__grains__.update(grains_info)
ret.append({"tag": "result", "changes": changes})
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/beacons/network_settings.py | 0.559892 | 0.164819 | network_settings.py | pypi |
import atexit
import logging
import select
import time
import salt.utils.beacons
import salt.utils.stringutils
try:
import pybonjour
HAS_PYBONJOUR = True
except ImportError:
HAS_PYBONJOUR = False
log = logging.getLogger(__name__)
__virtualname__ = "bonjour_announce"
LAST_GRAINS = {}
SD_REF = None
def __virtual__():
if HAS_PYBONJOUR:
return __virtualname__
err_msg = "pybonjour library is missing."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
def _close_sd_ref():
"""
Close the SD_REF object if it isn't NULL
For use with atexit.register
"""
global SD_REF
if SD_REF:
SD_REF.close()
SD_REF = None
def _register_callback(
sdRef, flags, errorCode, name, regtype, domain
): # pylint: disable=unused-argument
if errorCode != pybonjour.kDNSServiceErr_NoError:
log.error("Bonjour registration failed with error code %s", errorCode)
def validate(config):
"""
Validate the beacon configuration
"""
_config = salt.utils.beacons.list_to_dict(config)
if not isinstance(config, list):
return False, "Configuration for bonjour_announce beacon must be a list."
elif not all(x in _config for x in ("servicetype", "port", "txt")):
return (
False,
"Configuration for bonjour_announce beacon must contain servicetype, port and txt items.",
)
return True, "Valid beacon configuration."
def _enforce_txt_record_maxlen(key, value):
"""
Enforces the TXT record maximum length of 255 characters.
TXT record length includes key, value, and '='.
:param str key: Key of the TXT record
:param str value: Value of the TXT record
:rtype: str
:return: The value of the TXT record. It may be truncated if it exceeds
the maximum permitted length. In case of truncation, '...' is
appended to indicate that the entire value is not present.
"""
# Add 1 for '=' separator between key and value
if len(key) + len(value) + 1 > 255:
# 255 - 3 ('...') - 1 ('=') = 251
return value[: 251 - len(key)] + "..."
return value
def beacon(config):
"""
Broadcast values via zeroconf
If the announced values are static, it is advised to set run_once: True
(do not poll) on the beacon configuration.
The following are required configuration settings:
- ``servicetype`` - The service type to announce
- ``port`` - The port of the service to announce
- ``txt`` - The TXT record of the service being announced as a dict. Grains
can be used to define TXT values using one of following two formats:
- ``grains.<grain_name>``
- ``grains.<grain_name>[i]`` where i is an integer representing the
index of the grain to use. If the grain is not a list, the index is
ignored.
The following are optional configuration settings:
- ``servicename`` - Set the name of the service. Will use the hostname from
the minion's ``host`` grain if this value is not set.
- ``reset_on_change`` - If ``True`` and there is a change in TXT records
detected, it will stop announcing the service and then restart announcing
the service. This interruption in service announcement may be desirable
if the client relies on changes in the browse records to update its cache
of TXT records. Defaults to ``False``.
- ``reset_wait`` - The number of seconds to wait after announcement stops
announcing and before it restarts announcing in the case where there is a
change in TXT records detected and ``reset_on_change`` is ``True``.
Defaults to ``0``.
- ``copy_grains`` - If ``True``, Salt will copy the grains passed into the
beacon when it backs them up to check for changes on the next iteration.
Normally, instead of copy, it would use straight value assignment. This
will allow detection of changes to grains where the grains are modified
in-place instead of completely replaced. In-place grains changes are not
currently done in the main Salt code but may be done due to a custom
plug-in. Defaults to ``False``.
Example Config
.. code-block:: yaml
beacons:
bonjour_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
"""
ret = []
changes = {}
txt = {}
global LAST_GRAINS
global SD_REF
config = salt.utils.beacons.list_to_dict(config)
if "servicename" in config:
servicename = config["servicename"]
else:
servicename = __grains__["host"]
# Check for hostname change
if LAST_GRAINS and LAST_GRAINS["host"] != servicename:
changes["servicename"] = servicename
if LAST_GRAINS and config.get("reset_on_change", False):
# Check for IP address change in the case when we reset on change
if LAST_GRAINS.get("ipv4", []) != __grains__.get("ipv4", []):
changes["ipv4"] = __grains__.get("ipv4", [])
if LAST_GRAINS.get("ipv6", []) != __grains__.get("ipv6", []):
changes["ipv6"] = __grains__.get("ipv6", [])
for item in config["txt"]:
changes_key = "txt." + salt.utils.stringutils.to_unicode(item)
if config["txt"][item].startswith("grains."):
grain = config["txt"][item][7:]
grain_index = None
square_bracket = grain.find("[")
if square_bracket != -1 and grain[-1] == "]":
grain_index = int(grain[square_bracket + 1 : -1])
grain = grain[:square_bracket]
grain_value = __grains__.get(grain, "")
if isinstance(grain_value, list):
if grain_index is not None:
grain_value = grain_value[grain_index]
else:
grain_value = ",".join(grain_value)
txt[item] = _enforce_txt_record_maxlen(item, grain_value)
if LAST_GRAINS and (
LAST_GRAINS.get(grain, "") != __grains__.get(grain, "")
):
changes[changes_key] = txt[item]
else:
txt[item] = _enforce_txt_record_maxlen(item, config["txt"][item])
if not LAST_GRAINS:
changes[changes_key] = txt[item]
if changes:
txt_record = pybonjour.TXTRecord(items=txt)
if not LAST_GRAINS:
changes["servicename"] = servicename
changes["servicetype"] = config["servicetype"]
changes["port"] = config["port"]
changes["ipv4"] = __grains__.get("ipv4", [])
changes["ipv6"] = __grains__.get("ipv6", [])
SD_REF = pybonjour.DNSServiceRegister(
name=servicename,
regtype=config["servicetype"],
port=config["port"],
txtRecord=txt_record,
callBack=_register_callback,
)
atexit.register(_close_sd_ref)
ready = select.select([SD_REF], [], [])
if SD_REF in ready[0]:
pybonjour.DNSServiceProcessResult(SD_REF)
elif config.get("reset_on_change", False) or "servicename" in changes:
# A change in 'servicename' requires a reset because we can only
# directly update TXT records
SD_REF.close()
SD_REF = None
reset_wait = config.get("reset_wait", 0)
if reset_wait > 0:
time.sleep(reset_wait)
SD_REF = pybonjour.DNSServiceRegister(
name=servicename,
regtype=config["servicetype"],
port=config["port"],
txtRecord=txt_record,
callBack=_register_callback,
)
ready = select.select([SD_REF], [], [])
if SD_REF in ready[0]:
pybonjour.DNSServiceProcessResult(SD_REF)
else:
txt_record_raw = str(txt_record).encode("utf-8")
pybonjour.DNSServiceUpdateRecord(
SD_REF, RecordRef=None, flags=0, rdata=txt_record_raw
)
ret.append({"tag": "result", "changes": changes})
if config.get("copy_grains", False):
LAST_GRAINS = __grains__.copy()
else:
LAST_GRAINS = __grains__
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/beacons/bonjour_announce.py | 0.540196 | 0.175291 | bonjour_announce.py | pypi |
import logging
import re
import salt.utils.beacons
log = logging.getLogger(__name__)
__virtualname__ = "sensehat"
def __virtual__():
if "sensehat.get_pressure" in __salt__:
return __virtualname__
else:
err_msg = "sensehat.get_pressure is missing."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
def validate(config):
"""
Validate the beacon configuration
"""
# Configuration for sensehat beacon should be a list
if not isinstance(config, list):
return False, "Configuration for sensehat beacon must be a list."
else:
config = salt.utils.beacons.list_to_dict(config)
if "sensors" not in config:
return False, "Configuration for sensehat beacon requires sensors."
return True, "Valid beacon configuration"
def beacon(config):
"""
Monitor the temperature, humidity and pressure using the SenseHat sensors.
You can either specify a threshold for each value and only emit a beacon
if it is exceeded or define a range and emit a beacon when the value is
out of range.
Units:
* humidity: percent
* temperature: degrees Celsius
* temperature_from_pressure: degrees Celsius
* pressure: Millibars
.. code-block:: yaml
beacons:
sensehat:
- sensors:
humidity: 70%
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
"""
ret = []
min_default = {"humidity": "0", "pressure": "0", "temperature": "-273.15"}
config = salt.utils.beacons.list_to_dict(config)
for sensor in config.get("sensors", {}):
sensor_function = "sensehat.get_{}".format(sensor)
if sensor_function not in __salt__:
log.error("No sensor for meassuring %s. Skipping.", sensor)
continue
sensor_config = config["sensors"][sensor]
if isinstance(sensor_config, list):
sensor_min = str(sensor_config[0])
sensor_max = str(sensor_config[1])
else:
sensor_min = min_default.get(sensor, "0")
sensor_max = str(sensor_config)
if isinstance(sensor_min, str) and "%" in sensor_min:
sensor_min = re.sub("%", "", sensor_min)
if isinstance(sensor_max, str) and "%" in sensor_max:
sensor_max = re.sub("%", "", sensor_max)
sensor_min = float(sensor_min)
sensor_max = float(sensor_max)
current_value = __salt__[sensor_function]()
if not sensor_min <= current_value <= sensor_max:
ret.append({"tag": "sensehat/{}".format(sensor), sensor: current_value})
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/beacons/sensehat.py | 0.651687 | 0.168532 | sensehat.py | pypi |
import logging
import sys
from collections.abc import Iterable, Mapping, Sequence
log = logging.getLogger(__name__)
def is_non_string_iterable(obj):
"""
Returns True if obj is non-string iterable, False otherwise
Future proof way that is compatible with both Python3 and Python2 to check
for non string iterables.
Assumes in Python3 that, basestring = (str, bytes)
"""
return not isinstance(obj, str) and isinstance(obj, Iterable)
def is_non_string_sequence(obj):
"""
Returns True if obj is non-string sequence, False otherwise
Future proof way that is compatible with both Python3 and Python2 to check
for non string sequences.
Assumes in Python3 that, basestring = (str, bytes)
"""
return not isinstance(obj, str) and isinstance(obj, Sequence)
def extract_masters(opts, masters="master", port=None, raise_if_empty=True):
"""
Parses opts and generates a list of master (host,port) addresses.
By default looks for list of masters in opts['master'] and uses
opts['master_port'] as the default port when otherwise not provided.
Use the opts key given by masters for the masters list, default is 'master'
If parameter port is not None then uses the default port given by port
Returns a list of host address dicts of the form
[
{
'external': (host,port),
'internal': (host, port)
},
...
]
When only one address is provided it is assigned to the external address field
When not provided the internal address field is set to None.
For a given master the syntax options are as follows:
hostname [port]
external: hostname [port]
[internal: hostaddress [port]]
Where the hostname string could be either an FQDN or host address
in dotted number notation.
master.example.com
10.0.2.110
And the hostadress is in dotted number notation
The space delimited port is optional and if not provided a default is used.
The internal address is optional and if not provided is set to None
Examples showing the YAML in /etc/salt/master conf file:
1) Single host name string (fqdn or dotted address)
a)
master: me.example.com
b)
master: localhost
c)
master: 10.0.2.205
2) Single host name string with port
a)
master: me.example.com 4506
b)
master: 10.0.2.205 4510
3) Single master with external and optional internal host addresses for nat
in a dict
master:
external: me.example.com 4506
internal: 10.0.2.100 4506
3) One or host host names with optional ports in a list
master:
- me.example.com 4506
- you.example.com 4510
- 8.8.8.8
- they.example.com 4506
- 8.8.4.4 4506
4) One or more host name with external and optional internal host addresses
for Nat in a list of dicts
master:
-
external: me.example.com 4506
internal: 10.0.2.100 4506
-
external: you.example.com 4506
internal: 10.0.2.101 4506
-
external: we.example.com
- they.example.com
"""
if port is not None:
master_port = opts.get(port)
else:
master_port = opts.get("master_port")
try:
master_port = int(master_port)
except ValueError:
master_port = None
if not master_port:
emsg = "Invalid or missing opts['master_port']."
log.error(emsg)
raise ValueError(emsg)
entries = opts.get(masters, [])
if not entries:
emsg = "Invalid or missing opts['{}'].".format(masters)
log.error(emsg)
if raise_if_empty:
raise ValueError(emsg)
hostages = []
# extract candidate hostage (hostname dict) from entries
if is_non_string_sequence(entries): # multiple master addresses provided
for entry in entries:
if isinstance(entry, Mapping): # mapping
external = entry.get("external", "")
internal = entry.get("internal", "")
hostages.append(dict(external=external, internal=internal))
elif isinstance(entry, str): # string
external = entry
internal = ""
hostages.append(dict(external=external, internal=internal))
elif isinstance(entries, Mapping): # mapping
external = entries.get("external", "")
internal = entries.get("internal", "")
hostages.append(dict(external=external, internal=internal))
elif isinstance(entries, str): # string
external = entries
internal = ""
hostages.append(dict(external=external, internal=internal))
# now parse each hostname string for host and optional port
masters = []
for hostage in hostages:
external = hostage["external"]
internal = hostage["internal"]
if external:
external = parse_hostname(external, master_port)
if not external:
continue # must have a valid external host address
internal = parse_hostname(internal, master_port)
masters.append(dict(external=external, internal=internal))
return masters
def parse_hostname(hostname, default_port):
"""
Parse hostname string and return a tuple of (host, port)
If port missing in hostname string then use default_port
If anything is not a valid then return None
hostname should contain a host and an option space delimited port
host port
As an attempt to prevent foolish mistakes the parser also tries to identify
the port when it is colon delimited not space delimited. As in host:port.
This is problematic since IPV6 addresses may have colons in them.
Consequently the use of colon delimited ports is strongly discouraged.
An ipv6 address must have at least 2 colons.
"""
try:
host, sep, port = hostname.strip().rpartition(" ")
if not port: # invalid nothing there
return None
if not host: # no space separated port, only host as port use default port
host = port
port = default_port
# ipv6 must have two or more colons
if host.count(":") == 1: # only one so may be using colon delimited port
host, sep, port = host.rpartition(":")
if not host: # colon but not host so invalid
return None
if not port: # colon but no port so use default
port = default_port
host = host.strip()
try:
port = int(port)
except ValueError:
return None
except AttributeError:
return None
return (host, port) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/daemons/__init__.py | 0.46952 | 0.400222 | __init__.py | pypi |
import salt.utils.json
__virtualname__ = "redis"
def __virtual__():
"""
Only load if the redis module is in __salt__
"""
if "redis.get_key" in __salt__:
return __virtualname__
return False
def ext_pillar(minion_id, pillar, function, **kwargs):
"""
Grabs external pillar data based on configured function
"""
if function.startswith("_") or function not in globals():
return {}
# Call specified function to pull redis data
return globals()[function](minion_id, pillar, **kwargs)
def key_value(minion_id, pillar, pillar_key="redis_pillar"): # pylint: disable=W0613
"""
Looks for key in redis matching minion_id, returns a structure based on the
data type of the redis key. String for string type, dict for hash type and
lists for lists, sets and sorted sets.
pillar_key
Pillar key to return data into
"""
# Identify key type and process as needed based on that type
key_type = __salt__["redis.key_type"](minion_id)
if key_type == "string":
return {pillar_key: __salt__["redis.get_key"](minion_id)}
elif key_type == "hash":
return {pillar_key: __salt__["redis.hgetall"](minion_id)}
elif key_type == "list":
list_size = __salt__["redis.llen"](minion_id)
if not list_size:
return {}
return {pillar_key: __salt__["redis.lrange"](minion_id, 0, list_size - 1)}
elif key_type == "set":
return {pillar_key: __salt__["redis.smembers"](minion_id)}
elif key_type == "zset":
set_size = __salt__["redis.zcard"](minion_id)
if not set_size:
return {}
return {pillar_key: __salt__["redis.zrange"](minion_id, 0, set_size - 1)}
# Return nothing for unhandled types
return {}
def key_json(minion_id, pillar, pillar_key=None): # pylint: disable=W0613
"""
Pulls a string from redis and deserializes it from json. Deserialized
dictionary data loaded directly into top level if pillar_key is not set.
pillar_key
Pillar key to return data into
"""
key_data = __salt__["redis.get_key"](minion_id)
# Return nothing for non-existent keys
if not key_data:
return {}
data = salt.utils.json.loads(key_data)
# Return as requested
if isinstance(data, dict) and not pillar_key:
return data
elif not pillar_key:
return {"redis_pillar": data}
else:
return {pillar_key: data} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/pillar/redismod.py | 0.626353 | 0.250534 | redismod.py | pypi |
r"""
Use remote Mercurial repository as a Pillar source.
.. versionadded:: 2015.8.0
The module depends on the ``hglib`` python module being available.
This is the same requirement as for hgfs\_ so should not pose any extra
hurdles.
This external Pillar source can be configured in the master config file as such:
.. code-block:: yaml
ext_pillar:
- hg: ssh://hg@example.co/user/repo
"""
import copy
import hashlib
import logging
import os
import salt.pillar
import salt.utils.stringutils
try:
import hglib
except ImportError:
hglib = None
log = logging.getLogger(__name__)
__virtualname__ = "hg"
def __virtual__():
"""
Only load if hglib is available.
"""
ext_pillar_sources = [x for x in __opts__.get("ext_pillar", [])]
if not any(["hg" in x for x in ext_pillar_sources]):
return False
if not hglib:
log.error("hglib not present")
return False
return __virtualname__
def __init__(__opts__):
"""
Initialise
This is called every time a minion calls this external pillar.
"""
def ext_pillar(minion_id, pillar, repo, branch="default", root=None):
"""
Extract pillar from an hg repository
"""
with Repo(repo) as repo:
repo.update(branch)
envname = "base" if branch == "default" else branch
if root:
path = os.path.normpath(os.path.join(repo.working_dir, root))
else:
path = repo.working_dir
opts = copy.deepcopy(__opts__)
opts["pillar_roots"][envname] = [path]
pil = salt.pillar.Pillar(opts, __grains__, minion_id, envname)
return pil.compile_pillar(ext=False)
def update(repo_uri):
"""
Execute an hg pull on all the repos
"""
with Repo(repo_uri) as repo:
repo.pull()
class Repo:
"""
Deal with remote hg (mercurial) repository for Pillar
"""
def __init__(self, repo_uri):
"""Initialize a hg repo (or open it if it already exists)"""
self.repo_uri = repo_uri
cachedir = os.path.join(__opts__["cachedir"], "hg_pillar")
hash_type = getattr(hashlib, __opts__.get("hash_type", "md5"))
repo_hash = hash_type(salt.utils.stringutils.to_bytes(repo_uri)).hexdigest()
self.working_dir = os.path.join(cachedir, repo_hash)
if not os.path.isdir(self.working_dir):
self.repo = hglib.clone(repo_uri, self.working_dir)
self.repo.open()
else:
self.repo = hglib.open(self.working_dir)
def pull(self):
log.debug("Updating hg repo from hg_pillar module (pull)")
self.repo.pull()
def update(self, branch="default"):
"""
Ensure we are using the latest revision in the hg repository
"""
log.debug("Updating hg repo from hg_pillar module (pull)")
self.repo.pull()
log.debug("Updating hg repo from hg_pillar module (update)")
self.repo.update(branch, clean=True)
def close(self):
"""
Cleanup mercurial command server
"""
self.repo.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close() | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/pillar/hg_pillar.py | 0.776242 | 0.206374 | hg_pillar.py | pypi |
import logging
import re
try:
import pymongo
HAS_PYMONGO = True
except ImportError:
HAS_PYMONGO = False
__opts__ = {
"mongo.db": "salt",
"mongo.host": "salt",
"mongo.password": "",
"mongo.port": 27017,
"mongo.user": "",
}
def __virtual__():
if not HAS_PYMONGO:
return False
return "mongo"
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(
minion_id,
pillar, # pylint: disable=W0613
collection="pillar",
id_field="_id",
re_pattern=None,
re_replace="",
fields=None,
):
"""
Connect to a mongo database and read per-node pillar information.
Parameters:
* `collection`: The mongodb collection to read data from. Defaults to
``'pillar'``.
* `id_field`: The field in the collection that represents an individual
minion id. Defaults to ``'_id'``.
* `re_pattern`: If your naming convention in the collection is shorter
than the minion id, you can use this to trim the name.
`re_pattern` will be used to match the name, and `re_replace` will
be used to replace it. Backrefs are supported as they are in the
Python standard library. If ``None``, no mangling of the name will
be performed - the collection will be searched with the entire
minion id. Defaults to ``None``.
* `re_replace`: Use as the replacement value in node ids matched with
`re_pattern`. Defaults to ''. Feel free to use backreferences here.
* `fields`: The specific fields in the document to use for the pillar
data. If ``None``, will use the entire document. If using the
entire document, the ``_id`` field will be converted to string. Be
careful with other fields in the document as they must be string
serializable. Defaults to ``None``.
"""
host = __opts__["mongo.host"]
port = __opts__["mongo.port"]
log.info("connecting to %s:%s for mongo ext_pillar", host, port)
conn = pymongo.MongoClient(host, port)
log.debug("using database '%s'", __opts__["mongo.db"])
mdb = conn[__opts__["mongo.db"]]
user = __opts__.get("mongo.user")
password = __opts__.get("mongo.password")
if user and password:
log.debug("authenticating as '%s'", user)
mdb.authenticate(user, password)
# Do the regex string replacement on the minion id
if re_pattern:
minion_id = re.sub(re_pattern, re_replace, minion_id)
log.info(
"ext_pillar.mongo: looking up pillar def for {'%s': '%s'} in mongo",
id_field,
minion_id,
)
result = mdb[collection].find_one({id_field: minion_id}, projection=fields)
if result:
if fields:
log.debug("ext_pillar.mongo: found document, returning fields '%s'", fields)
else:
log.debug("ext_pillar.mongo: found document, returning whole doc")
if "_id" in result:
# Converting _id to a string
# will avoid the most common serialization error cases, but DBRefs
# and whatnot will still cause problems.
result["_id"] = str(result["_id"])
return result
else:
# If we can't find the minion the database it's not necessarily an
# error.
log.debug("ext_pillar.mongo: no document found in collection %s", collection)
return {} | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/pillar/mongo.py | 0.576065 | 0.219683 | mongo.py | pypi |
import logging
import os
import pickle
import time
from copy import deepcopy
import salt.utils.files
import salt.utils.hashutils
from salt.pillar import Pillar
HAS_LIBS = False
try:
# pylint: disable=no-name-in-module
from azure.storage.blob import BlobServiceClient
# pylint: enable=no-name-in-module
HAS_LIBS = True
except ImportError:
pass
__virtualname__ = "azureblob"
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_LIBS:
return (
False,
"The following dependency is required to use the Azure Blob ext_pillar: "
"Microsoft Azure Storage Blob >= 12.0.0 ",
)
return __virtualname__
def ext_pillar(
minion_id,
pillar, # pylint: disable=W0613
container,
connection_string,
multiple_env=False,
environment="base",
blob_cache_expire=30,
blob_sync_on_update=True,
):
"""
Execute a command and read the output as YAML.
:param container: The name of the target Azure Blob Container.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
:param blob_sync_on_update: Specifies if the cache is synced on update. Defaults to True.
"""
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(
os.path.join(_get_cache_dir(), environment, container)
)
if __opts__["pillar_roots"].get(environment, []) == [pillar_dir]:
return {}
metadata = _init(
connection_string, container, multiple_env, environment, blob_cache_expire
)
log.debug("Blob metadata: %s", metadata)
if blob_sync_on_update:
# sync the containers to the local cache
log.info("Syncing local pillar cache from Azure Blob...")
for saltenv, env_meta in metadata.items():
for container, files in _find_files(env_meta).items():
for file_path in files:
cached_file_path = _get_cached_file_name(
container, saltenv, file_path
)
log.info("%s - %s : %s", container, saltenv, file_path)
# load the file from Azure Blob if not in the cache or too old
_get_file_from_blob(
connection_string,
metadata,
saltenv,
container,
file_path,
cached_file_path,
)
log.info("Sync local pillar cache from Azure Blob completed.")
opts = deepcopy(__opts__)
opts["pillar_roots"][environment] = (
[os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir]
)
# Avoid recursively re-adding this same pillar
opts["ext_pillar"] = [x for x in opts["ext_pillar"] if "azureblob" not in x]
pil = Pillar(opts, __grains__, minion_id, environment)
compiled_pillar = pil.compile_pillar(ext=False)
return compiled_pillar
def _init(connection_string, container, multiple_env, environment, blob_cache_expire):
"""
.. versionadded:: 3001
Connect to Blob Storage and download the metadata for each file in all containers specified and
cache the data to disk.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param container: The name of the target Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
"""
cache_file = _get_containers_cache_filename(container)
exp = time.time() - blob_cache_expire
# Check if cache_file exists and its mtime
if os.path.isfile(cache_file):
cache_file_mtime = os.path.getmtime(cache_file)
else:
# If the file does not exist then set mtime to 0 (aka epoch)
cache_file_mtime = 0
expired = cache_file_mtime <= exp
log.debug(
"Blob storage container cache file %s is %sexpired, mtime_diff=%ss,"
" expiration=%ss",
cache_file,
"" if expired else "not ",
cache_file_mtime - exp,
blob_cache_expire,
)
if expired:
pillars = _refresh_containers_cache_file(
connection_string, container, cache_file, multiple_env, environment
)
else:
pillars = _read_containers_cache_file(cache_file)
log.debug("Blob container retrieved pillars %s", pillars)
return pillars
def _get_cache_dir():
"""
.. versionadded:: 3001
Get pillar cache directory. Initialize it if it does not exist.
"""
cache_dir = os.path.join(__opts__["cachedir"], "pillar_azureblob")
if not os.path.isdir(cache_dir):
log.debug("Initializing Azure Blob Pillar Cache")
os.makedirs(cache_dir)
return cache_dir
def _get_cached_file_name(container, saltenv, path):
"""
.. versionadded:: 3001
Return the cached file name for a container path file.
:param container: The name of the target Azure Blob Container.
:param saltenv: Specifies which environment the container represents.
:param path: The path of the file in the container.
"""
file_path = os.path.join(_get_cache_dir(), saltenv, container, path)
# make sure container and saltenv directories exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
return file_path
def _get_containers_cache_filename(container):
"""
.. versionadded:: 3001
Return the filename of the cache for container contents. Create the path if it does not exist.
:param container: The name of the target Azure Blob Container.
"""
cache_dir = _get_cache_dir()
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return os.path.join(cache_dir, "{}-files.cache".format(container))
def _refresh_containers_cache_file(
connection_string, container, cache_file, multiple_env=False, environment="base"
):
"""
.. versionadded:: 3001
Downloads the entire contents of an Azure storage container to the local filesystem.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param container: The name of the target Azure Blob Container.
:param cache_file: The path of where the file will be cached.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
:param environment: Specifies which environment the container represents when in single environment mode. This is
ignored if multiple_env is set as True.
"""
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
# Create the ContainerClient object
container_client = blob_service_client.get_container_client(container)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception: %s", exc)
return False
metadata = {}
def _walk_blobs(saltenv="base", prefix=None):
# Walk the blobs in the container with a generator
blob_list = container_client.walk_blobs(name_starts_with=prefix)
# Iterate over the generator
while True:
try:
blob = next(blob_list)
except StopIteration:
break
log.debug("Raw blob attributes: %s", blob)
# Directories end with "/".
if blob.name.endswith("/"):
# Recurse into the directory
_walk_blobs(prefix=blob.name)
continue
if multiple_env:
saltenv = "base" if (not prefix or prefix == ".") else prefix[:-1]
if saltenv not in metadata:
metadata[saltenv] = {}
if container not in metadata[saltenv]:
metadata[saltenv][container] = []
metadata[saltenv][container].append(blob)
_walk_blobs(saltenv=environment)
# write the metadata to disk
if os.path.isfile(cache_file):
os.remove(cache_file)
log.debug("Writing Azure blobs pillar cache file")
with salt.utils.files.fopen(cache_file, "wb") as fp_:
pickle.dump(metadata, fp_)
return metadata
def _read_containers_cache_file(cache_file):
"""
.. versionadded:: 3001
Return the contents of the containers cache file.
:param cache_file: The path for where the file will be cached.
"""
log.debug("Reading containers cache file")
with salt.utils.files.fopen(cache_file, "rb") as fp_:
data = pickle.load(fp_)
return data
def _find_files(metadata):
"""
.. versionadded:: 3001
Looks for all the files in the Azure Blob container cache metadata.
:param metadata: The metadata for the container files.
"""
ret = {}
for container, data in metadata.items():
if container not in ret:
ret[container] = []
# grab the paths from the metadata
file_paths = [k["name"] for k in data]
# filter out the dirs
ret[container] += [k for k in file_paths if not k.endswith("/")]
return ret
def _find_file_meta(metadata, container, saltenv, path):
"""
.. versionadded:: 3001
Looks for a file's metadata in the Azure Blob Container cache file.
:param metadata: The metadata for the container files.
:param container: The name of the target Azure Blob Container.
:param saltenv: Specifies which environment the container represents.
:param path: The path of the file in the container.
"""
env_meta = metadata[saltenv] if saltenv in metadata else {}
container_meta = env_meta[container] if container in env_meta else {}
for item_meta in container_meta:
item_meta = dict(item_meta)
if "name" in item_meta and item_meta["name"] == path:
return item_meta
def _get_file_from_blob(
connection_string, metadata, saltenv, container, path, cached_file_path
):
"""
.. versionadded:: 3001
Downloads the entire contents of an Azure storage container to the local filesystem.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param metadata: The metadata for the container files.
:param saltenv: Specifies which environment the container represents when in single environment mode. This is
ignored if multiple_env is set as True.
:param container: The name of the target Azure Blob Container.
:param path: The path of the file in the container.
:param cached_file_path: The path of where the file will be cached.
"""
# check the local cache...
if os.path.isfile(cached_file_path):
file_meta = _find_file_meta(metadata, container, saltenv, path)
file_md5 = (
"".join(list(filter(str.isalnum, file_meta["etag"]))) if file_meta else None
)
cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, "md5")
# hashes match we have a cache hit
log.debug(
"Cached file: path=%s, md5=%s, etag=%s",
cached_file_path,
cached_md5,
file_md5,
)
if cached_md5 == file_md5:
return
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
# Create the ContainerClient object
container_client = blob_service_client.get_container_client(container)
# Create the BlobClient object
blob_client = container_client.get_blob_client(path)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception: %s", exc)
return False
with salt.utils.files.fopen(cached_file_path, "wb") as outfile:
outfile.write(blob_client.download_blob().readall())
return | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/pillar/azureblob.py | 0.666497 | 0.178741 | azureblob.py | pypi |
import copy
import logging
import re
import salt.cache
import salt.utils.data
import salt.utils.minions
from salt._compat import ipaddress
log = logging.getLogger(__name__)
def targets(tgt, tgt_type="glob", **kwargs): # pylint: disable=W0613
"""
Return the targets from the Salt Masters' minion cache.
All targets and matchers are supported.
The resulting roster can be configured using ``roster_order`` and ``roster_default``.
"""
minions = salt.utils.minions.CkMinions(__opts__)
_res = minions.check_minions(tgt, tgt_type)
minions = _res["minions"]
ret = {}
if not minions:
return ret
# log.debug(minions)
cache = salt.cache.Cache(__opts__)
roster_order = __opts__.get(
"roster_order",
{"host": ("ipv6-private", "ipv6-global", "ipv4-private", "ipv4-public")},
)
ret = {}
for minion_id in minions:
try:
minion = _load_minion(minion_id, cache)
except LookupError:
continue
minion_res = copy.deepcopy(__opts__.get("roster_defaults", {}))
for param, order in roster_order.items():
if not isinstance(order, (list, tuple)):
order = [order]
for key in order:
kres = _minion_lookup(minion_id, key, minion)
if kres:
minion_res[param] = kres
break
if "host" in minion_res:
ret[minion_id] = minion_res
else:
log.warning("Could not determine host information for minion %s", minion_id)
log.debug("Roster lookup result: %s", ret)
return ret
def _load_minion(minion_id, cache):
data_minion, grains, pillar = salt.utils.minions.get_minion_data(
minion_id, __opts__
)
if minion_id != data_minion:
log.error("Asked for minion %s, got %s", minion_id, data_minion)
raise LookupError
if not grains:
log.warning("No grain data for minion id %s", minion_id)
grains = {}
if not pillar:
log.warning("No pillar data for minion id %s", minion_id)
pillar = {}
addrs = {
4: sorted(ipaddress.IPv4Address(addr) for addr in grains.get("ipv4", [])),
6: sorted(ipaddress.IPv6Address(addr) for addr in grains.get("ipv6", [])),
}
mine = cache.fetch("minions/{}".format(minion_id), "mine")
return grains, pillar, addrs, mine
def _data_lookup(ref, lookup):
if isinstance(lookup, str):
lookup = [lookup]
res = []
for data_key in lookup:
data = salt.utils.data.traverse_dict_and_list(ref, data_key, None)
# log.debug('Fetched %s in %s: %s', data_key, ref, data)
if data:
res.append(data)
return res
def _minion_lookup(minion_id, key, minion):
grains, pillar, addrs, mine = minion
if key == "id":
# Just paste in the minion ID
return minion_id
elif isinstance(key, dict):
# Lookup the key in the dict
for data_id, lookup in key.items():
ref = {"pillar": pillar, "grain": grains, "mine": mine}[data_id]
for k in _data_lookup(ref, lookup):
if k:
return k
return None
elif key.startswith("sdb://"):
# It's a Salt SDB url
return salt["sdb.get"](key)
elif re.match(r"^[0-9a-fA-F:./]+$", key):
# It smells like a CIDR block
try:
net = ipaddress.ip_network(key, strict=True)
except ValueError:
log.error("%s is an invalid CIDR network", net)
return None
for addr in addrs[net.version]:
if addr in net:
return str(addr)
else:
# Take the addresses from the grains and filter them
filters = {
"global": lambda addr: addr.is_global
if addr.version == 6
else not addr.is_private,
"public": lambda addr: not addr.is_private,
"private": lambda addr: addr.is_private
and not addr.is_loopback
and not addr.is_link_local,
"local": lambda addr: addr.is_loopback,
}
ip_vers = [4, 6]
if key.startswith("ipv"):
ip_vers = [int(key[3])]
key = key[5:]
for ip_ver in ip_vers:
try:
for addr in addrs[ip_ver]:
if filters[key](addr):
return str(addr)
except KeyError:
raise KeyError(
"Invalid filter {} specified in roster_order".format(key)
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/roster/cache.py | 0.620966 | 0.262065 | cache.py | pypi |
import copy
import fnmatch
import logging
log = logging.getLogger(__name__)
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
log.error("Unable to load range library")
# pylint: enable=import-error
def __virtual__():
return HAS_RANGE
def targets(tgt, tgt_type="range", **kwargs):
"""
Return the targets from a range query
"""
r = seco.range.Range(__opts__["range_server"])
log.debug("Range connection to '%s' established", __opts__["range_server"])
hosts = []
try:
log.debug("Querying range for '%s'", tgt)
hosts = r.expand(tgt)
except seco.range.RangeException as err:
log.error("Range server exception: %s", err)
return {}
log.debug("Range responded with: '%s'", hosts)
# Currently we only support giving a raw range entry, no target filtering supported other than what range returns :S
tgt_func = {
"range": target_range,
"glob": target_range,
# 'glob': target_glob,
}
log.debug("Filtering using tgt_type: '%s'", tgt_type)
try:
targeted_hosts = tgt_func[tgt_type](tgt, hosts)
except KeyError:
raise NotImplementedError
log.debug("Targeting data for salt-ssh: '%s'", targeted_hosts)
return targeted_hosts
def target_range(tgt, hosts):
ret = {}
for host in hosts:
ret[host] = copy.deepcopy(__opts__.get("roster_defaults", {}))
ret[host].update({"host": host})
if __opts__.get("ssh_user"):
ret[host].update({"user": __opts__["ssh_user"]})
return ret
def target_glob(tgt, hosts):
ret = {}
for host in hosts:
if fnmatch.fnmatch(tgt, host):
ret[host] = copy.deepcopy(__opts__.get("roster_defaults", {}))
ret[host].update({"host": host})
if __opts__.get("ssh_user"):
ret[host].update({"user": __opts__["ssh_user"]})
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/roster/range.py | 0.479504 | 0.224757 | range.py | pypi |
import copy
import logging
import socket
import salt.utils.network
from salt._compat import ipaddress
log = logging.getLogger(__name__)
def targets(tgt, tgt_type="glob", **kwargs):
"""
Return the targets from the flat yaml file, checks opts for location but
defaults to /etc/salt/roster
"""
rmatcher = RosterMatcher(tgt, tgt_type)
return rmatcher.targets()
class RosterMatcher:
"""
Matcher for the roster data structure
"""
def __init__(self, tgt, tgt_type):
self.tgt = tgt
self.tgt_type = tgt_type
def targets(self):
"""
Return ip addrs based on netmask, sitting in the "glob" spot because
it is the default
"""
addrs = []
ret = {}
ports = __opts__["ssh_scan_ports"]
if not isinstance(ports, list):
# Comma-separate list of integers
ports = list(map(int, str(ports).split(",")))
if self.tgt_type == "list":
tgts = self.tgt
else:
tgts = [self.tgt]
for tgt in tgts:
try:
addrs.append(ipaddress.ip_address(tgt))
except ValueError:
try:
addrs.extend(ipaddress.ip_network(tgt).hosts())
except ValueError:
pass
for addr in addrs:
addr = str(addr)
ret[addr] = copy.deepcopy(__opts__.get("roster_defaults", {}))
log.trace("Scanning host: %s", addr)
for port in ports:
log.trace("Scanning port: %s", port)
try:
sock = salt.utils.network.get_socket(addr, socket.SOCK_STREAM)
sock.settimeout(float(__opts__["ssh_scan_timeout"]))
sock.connect((addr, port))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
ret[addr].update({"host": addr, "port": port})
except OSError:
pass
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/roster/scan.py | 0.587943 | 0.206384 | scan.py | pypi |
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import WebSocket
# pylint: enable=3rd-party-module-not-gated
cherrypy.tools.websocket = WebSocketTool()
WebSocketPlugin(cherrypy.engine).subscribe()
class SynchronizingWebsocket(WebSocket):
"""
Class to handle requests sent to this websocket connection.
Each instance of this class represents a Salt websocket connection.
Waits to receive a ``ready`` message from the client.
Calls send on its end of the pipe to signal to the sender on receipt
of ``ready``.
This class also kicks off initial information probing jobs when clients
initially connect. These jobs help gather information about minions, jobs,
and documentation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This pipe needs to represent the parent end of a pipe.
# Clients need to ensure that the pipe assigned to ``self.pipe`` is
# the ``parent end`` of a
# `pipe <https://docs.python.org/2/library/multiprocessing.html#exchanging-objects-between-processes>`_.
self.pipe = None
# The token that we can use to make API calls.
# There are times when we would like to kick off jobs,
# examples include trying to obtain minions connected.
self.token = None
# Options represent ``salt`` options defined in the configs.
self.opts = None
def received_message(self, message):
"""
Checks if the client has sent a ready message.
A ready message causes ``send()`` to be called on the
``parent end`` of the pipe.
Clients need to ensure that the pipe assigned to ``self.pipe`` is
the ``parent end`` of a pipe.
This ensures completion of the underlying websocket connection
and can be used to synchronize parallel senders.
"""
if message.data.decode("utf-8") == "websocket client ready":
self.pipe.send(message)
self.send("server received message", False) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/netapi/rest_cherrypy/tools/websockets.py | 0.707405 | 0.173498 | websockets.py | pypi |
import logging
from salt.exceptions import SaltSystemExit
# This must be present or the Salt loader won't load this module.
__proxyenabled__ = ["cisconso"]
try:
from pynso.client import NSOClient
from pynso.datastores import DatastoreType
HAS_PYNSO_LIBS = True
except ImportError:
HAS_PYNSO_LIBS = False
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
GRAINS_CACHE = {}
DETAILS = {}
# Set up logging
log = logging.getLogger(__file__)
# Define the module's virtual name
__virtualname__ = "cisconso"
def __virtual__():
return HAS_PYNSO_LIBS
def init(opts):
# Set configuration details
DETAILS["host"] = opts["proxy"].get("host")
DETAILS["username"] = opts["proxy"].get("username")
DETAILS["password"] = opts["proxy"].get("password")
DETAILS["use_ssl"] = bool(opts["proxy"].get("use_ssl"))
DETAILS["port"] = int(opts["proxy"].get("port"))
def grains():
"""
Get the grains from the proxy device.
"""
if not GRAINS_CACHE:
return _grains()
return GRAINS_CACHE
def _get_client():
return NSOClient(
host=DETAILS["host"],
username=DETAILS["username"],
password=DETAILS["password"],
port=DETAILS["port"],
ssl=DETAILS["use_ssl"],
)
def ping():
"""
Check to see if the host is responding. Returns False if the host didn't
respond, True otherwise.
CLI Example:
.. code-block:: bash
salt cisco-nso test.ping
"""
try:
client = _get_client()
client.info()
except SaltSystemExit as err:
log.warning(err)
return False
return True
def shutdown():
"""
Shutdown the connection to the proxy device. For this proxy,
shutdown is a no-op.
"""
log.debug("Cisco NSO proxy shutdown() called...")
def get_data(datastore, path):
"""
Get the configuration of the device tree at the given path
:param datastore: The datastore, e.g. running, operational.
One of the NETCONF store IETF types
:type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path, a list of element names in order,
comma separated
:type path: ``list`` of ``str`` OR ``tuple``
:return: The network configuration at that tree
:rtype: ``dict``
.. code-block:: bash
salt cisco-nso cisconso.get_data devices
"""
client = _get_client()
return client.get_datastore_data(datastore, path)
def set_data_value(datastore, path, data):
"""
Get a data entry in a datastore
:param datastore: The datastore, e.g. running, operational.
One of the NETCONF store IETF types
:type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path to set the value at,
a list of element names in order, comma separated
:type path: ``list`` of ``str`` OR ``tuple``
:param data: The new value at the given path
:type data: ``dict``
:rtype: ``bool``
:return: ``True`` if successful, otherwise error.
"""
client = _get_client()
return client.set_data_value(datastore, path, data)
def get_rollbacks():
"""
Get a list of stored configuration rollbacks
"""
return _get_client().get_rollbacks()
def get_rollback(name):
"""
Get the backup of stored a configuration rollback
:param name: Typically an ID of the backup
:type name: ``str``
:rtype: ``str``
:return: the contents of the rollback snapshot
"""
return _get_client().get_rollback(name)
def apply_rollback(datastore, name):
"""
Apply a system rollback
:param datastore: The datastore, e.g. running, operational.
One of the NETCONF store IETF types
:type datastore: :class:`DatastoreType` (``str`` enum).
:param name: an ID of the rollback to restore
:type name: ``str``
"""
return _get_client().apply_rollback(datastore, name)
def _grains():
"""
Helper function to the grains from the proxied devices.
"""
client = _get_client()
# This is a collection of the configuration of all running devices under NSO
ret = client.get_datastore(DatastoreType.RUNNING)
GRAINS_CACHE.update(ret)
return GRAINS_CACHE | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/proxy/cisconso.py | 0.673621 | 0.19031 | cisconso.py | pypi |
import copy
import logging
# Import Salt modules
from salt.exceptions import SaltException
# -----------------------------------------------------------------------------
# proxy properties
# -----------------------------------------------------------------------------
__proxyenabled__ = ["nxos_api"]
# proxy name
# -----------------------------------------------------------------------------
# globals
# -----------------------------------------------------------------------------
__virtualname__ = "nxos_api"
log = logging.getLogger(__name__)
nxos_device = {}
# -----------------------------------------------------------------------------
# property functions
# -----------------------------------------------------------------------------
def __virtual__():
"""
This Proxy Module is widely available as there are no external dependencies.
"""
return __virtualname__
# -----------------------------------------------------------------------------
# proxy functions
# -----------------------------------------------------------------------------
def init(opts):
"""
Open the connection to the Nexsu switch over the NX-API.
As the communication is HTTP based, there is no connection to maintain,
however, in order to test the connectivity and make sure we are able to
bring up this Minion, we are executing a very simple command (``show clock``)
which doesn't come with much overhead and it's sufficient to confirm we are
indeed able to connect to the NX-API endpoint as configured.
"""
proxy_dict = opts.get("proxy", {})
conn_args = copy.deepcopy(proxy_dict)
conn_args.pop("proxytype", None)
opts["multiprocessing"] = conn_args.pop("multiprocessing", True)
# This is not a SSH-based proxy, so it should be safe to enable
# multiprocessing.
try:
rpc_reply = __utils__["nxos_api.rpc"]("show clock", **conn_args)
# Execute a very simple command to confirm we are able to connect properly
nxos_device["conn_args"] = conn_args
nxos_device["initialized"] = True
nxos_device["up"] = True
except SaltException:
log.error("Unable to connect to %s", conn_args["host"], exc_info=True)
raise
return True
def ping():
"""
Connection open successfully?
"""
return nxos_device.get("up", False)
def initialized():
"""
Connection finished initializing?
"""
return nxos_device.get("initialized", False)
def shutdown(opts):
"""
Closes connection with the device.
"""
log.debug("Shutting down the nxos_api Proxy Minion %s", opts["id"])
# -----------------------------------------------------------------------------
# callable functions
# -----------------------------------------------------------------------------
def get_conn_args():
"""
Returns the connection arguments of the Proxy Minion.
"""
conn_args = copy.deepcopy(nxos_device["conn_args"])
return conn_args
def rpc(commands, method="cli", **kwargs):
"""
Executes an RPC request over the NX-API.
"""
conn_args = nxos_device["conn_args"]
conn_args.update(kwargs)
return __utils__["nxos_api.rpc"](commands, method=method, **conn_args) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/proxy/nxos_api.py | 0.563258 | 0.19095 | nxos_api.py | pypi |
import logging
import os
import warnings
import salt.utils.kinds as kinds
from salt.exceptions import SaltClientError, SaltSystemExit, get_error_message
from salt.utils import migrations
from salt.utils.platform import is_junos
from salt.utils.process import HAS_PSUTIL
# All salt related deprecation warnings should be shown once each!
warnings.filterwarnings(
"once", # Show once
"", # No deprecation message match
DeprecationWarning, # This filter is for DeprecationWarnings
r"^(salt|salt\.(.*))$", # Match module(s) 'salt' and 'salt.<whatever>'
append=True,
)
# While we are supporting Python2.6, hide nested with-statements warnings
warnings.filterwarnings(
"ignore",
"With-statements now directly support multiple context managers",
DeprecationWarning,
append=True,
)
# Filter the backports package UserWarning about being re-imported
warnings.filterwarnings(
"ignore",
"^Module backports was already imported from (.*), but (.*) is being added to"
" sys.path$",
UserWarning,
append=True,
)
# the try block below bypasses an issue at build time so that modules don't
# cause the build to fail
try:
import salt.utils.parsers
from salt.utils.verify import check_user, verify_env, verify_socket
from salt.utils.zeromq import ip_bracket
except ImportError as exc:
if exc.args[0] != "No module named _msgpack":
raise
log = logging.getLogger(__name__)
class DaemonsMixin: # pylint: disable=no-init
"""
Uses the same functions for all daemons
"""
def verify_hash_type(self):
"""
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
"""
if self.config["hash_type"].lower() in ["md5", "sha1"]:
log.warning(
"IMPORTANT: Do not use %s hashing algorithm! Please set "
'"hash_type" to sha256 in Salt %s config!',
self.config["hash_type"],
self.__class__.__name__,
)
def action_log_info(self, action):
"""
Say daemon starting.
:param action
:return:
"""
log.info("%s the Salt %s", action, self.__class__.__name__)
def start_log_info(self):
"""
Say daemon starting.
:return:
"""
log.info("The Salt %s is starting up", self.__class__.__name__)
def shutdown_log_info(self):
"""
Say daemon shutting down.
:return:
"""
log.info("The Salt %s is shut down", self.__class__.__name__)
def environment_failure(self, error):
"""
Log environment failure for the daemon and exit with the error code.
:param error:
:return:
"""
log.exception(
"Failed to create environment for %s: %s",
self.__class__.__name__,
get_error_message(error),
)
self.shutdown(error)
class Master(
salt.utils.parsers.MasterOptionParser, DaemonsMixin
): # pylint: disable=no-init
"""
Creates a master server
"""
def _handle_signals(self, signum, sigframe):
if hasattr(self.master, "process_manager"):
# escalate signal to the process manager processes
self.master.process_manager._handle_signals(signum, sigframe)
super()._handle_signals(signum, sigframe)
def prepare(self):
"""
Run the preparation sequence required to start a salt master server.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
"""
super().prepare()
try:
if self.config["verify_env"]:
v_dirs = [
self.config["pki_dir"],
os.path.join(self.config["pki_dir"], "minions"),
os.path.join(self.config["pki_dir"], "minions_pre"),
os.path.join(self.config["pki_dir"], "minions_denied"),
os.path.join(self.config["pki_dir"], "minions_autosign"),
os.path.join(self.config["pki_dir"], "minions_rejected"),
self.config["cachedir"],
os.path.join(self.config["cachedir"], "jobs"),
os.path.join(self.config["cachedir"], "proc"),
self.config["sock_dir"],
self.config["token_dir"],
self.config["syndic_dir"],
self.config["sqlite_queue_dir"],
]
verify_env(
v_dirs,
self.config["user"],
permissive=self.config["permissive_pki_access"],
root_dir=self.config["root_dir"],
pki_dir=self.config["pki_dir"],
)
# Clear out syndics from cachedir
for syndic_file in os.listdir(self.config["syndic_dir"]):
os.remove(os.path.join(self.config["syndic_dir"], syndic_file))
except OSError as error:
self.environment_failure(error)
self.action_log_info("Setting up")
# TODO: AIO core is separate from transport
if not verify_socket(
self.config["interface"],
self.config["publish_port"],
self.config["ret_port"],
):
self.shutdown(4, "The ports are not available to bind")
self.config["interface"] = ip_bracket(self.config["interface"])
migrations.migrate_paths(self.config)
# Late import so logging works correctly
import salt.master
self.master = salt.master.Master(self.config)
self.daemonize_if_required()
self.set_pidfile()
salt.utils.process.notify_systemd()
def start(self):
"""
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
"""
super().start()
if check_user(self.config["user"]):
self.action_log_info("Starting up")
self.verify_hash_type()
self.master.start()
def shutdown(self, exitcode=0, exitmsg=None):
"""
If sub-classed, run any shutdown operations on this method.
"""
self.shutdown_log_info()
msg = "The salt master is shutdown. "
if exitmsg is not None:
exitmsg = msg + exitmsg
else:
exitmsg = msg.strip()
super().shutdown(exitcode, exitmsg)
class Minion(
salt.utils.parsers.MinionOptionParser, DaemonsMixin
): # pylint: disable=no-init
"""
Create a minion server
"""
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
if hasattr(self.minion, "stop"):
self.minion.stop(signum)
super()._handle_signals(signum, sigframe)
# pylint: disable=no-member
def prepare(self):
"""
Run the preparation sequence required to start a salt minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
"""
super().prepare()
try:
if self.config["verify_env"]:
confd = self.config.get("default_include")
if confd:
# If 'default_include' is specified in config, then use it
if "*" in confd:
# Value is of the form "minion.d/*.conf"
confd = os.path.dirname(confd)
if not os.path.isabs(confd):
# If configured 'default_include' is not an absolute
# path, consider it relative to folder of 'conf_file'
# (/etc/salt by default)
confd = os.path.join(
os.path.dirname(self.config["conf_file"]), confd
)
else:
confd = os.path.join(
os.path.dirname(self.config["conf_file"]), "minion.d"
)
v_dirs = [
self.config["pki_dir"],
self.config["cachedir"],
self.config["sock_dir"],
self.config["extension_modules"],
confd,
]
verify_env(
v_dirs,
self.config["user"],
permissive=self.config["permissive_pki_access"],
root_dir=self.config["root_dir"],
pki_dir=self.config["pki_dir"],
)
except OSError as error:
self.environment_failure(error)
log.info('Setting up the Salt Minion "%s"', self.config["id"])
migrations.migrate_paths(self.config)
# Bail out if we find a process running and it matches out pidfile
if (HAS_PSUTIL and not self.claim_process_responsibility()) or (
not HAS_PSUTIL and self.check_running()
):
self.action_log_info("An instance is already running. Exiting")
self.shutdown(1)
transport = self.config.get("transport").lower()
try:
# Late import so logging works correctly
import salt.minion
# If the minion key has not been accepted, then Salt enters a loop
# waiting for it, if we daemonize later then the minion could halt
# the boot process waiting for a key to be accepted on the master.
# This is the latest safe place to daemonize
self.daemonize_if_required()
self.set_pidfile()
if self.config.get("master_type") == "func":
salt.minion.eval_master_func(self.config)
self.minion = salt.minion.MinionManager(self.config)
except Exception: # pylint: disable=broad-except
log.error(
"An error occured while setting up the minion manager", exc_info=True
)
self.shutdown(1)
def start(self):
"""
Start the actual minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
"""
super().start()
while True:
try:
self._real_start()
except SaltClientError as exc:
# Restart for multi_master failover when daemonized
if self.options.daemon:
continue
break
def _real_start(self):
try:
if check_user(self.config["user"]):
self.action_log_info("Starting up")
self.verify_hash_type()
self.minion.tune_in()
if self.minion.restart:
raise SaltClientError("Minion could not connect to Master")
except (KeyboardInterrupt, SaltSystemExit) as error:
self.action_log_info("Stopping")
if isinstance(error, KeyboardInterrupt):
log.warning("Exiting on Ctrl-c")
self.shutdown()
else:
log.error(error)
self.shutdown(error.code)
def call(self, cleanup_protecteds):
"""
Start the actual minion as a caller minion.
cleanup_protecteds is list of yard host addresses that should not be
cleaned up this is to fix race condition when salt-caller minion starts up
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
"""
try:
self.prepare()
if check_user(self.config["user"]):
self.minion.opts["__role"] = kinds.APPL_KIND_NAMES[
kinds.applKinds.caller
]
self.minion.call_in()
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info("Stopping")
if isinstance(exc, KeyboardInterrupt):
log.warning("Exiting on Ctrl-c")
self.shutdown()
else:
log.error(exc)
self.shutdown(exc.code)
def shutdown(self, exitcode=0, exitmsg=None):
"""
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
"""
self.action_log_info("Shutting down")
if hasattr(self, "minion") and hasattr(self.minion, "destroy"):
self.minion.destroy()
super().shutdown(
exitcode,
(
"The Salt {} is shutdown. {}".format(
self.__class__.__name__, (exitmsg or "")
).strip()
),
)
# pylint: enable=no-member
class ProxyMinion(
salt.utils.parsers.ProxyMinionOptionParser, DaemonsMixin
): # pylint: disable=no-init
"""
Create a proxy minion server
"""
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
self.minion.stop(signum)
super()._handle_signals(signum, sigframe)
# pylint: disable=no-member
def prepare(self):
"""
Run the preparation sequence required to start a salt proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
"""
super().prepare()
## allow for native minion
if not is_junos():
if not self.values.proxyid:
self.error("salt-proxy requires --proxyid")
# Proxies get their ID from the command line. This may need to change in
# the future.
# We used to set this here. Now it is set in ProxyMinionOptionParser
# by passing it via setup_config to config.minion_config
# self.config['id'] = self.values.proxyid
try:
if self.config["verify_env"]:
confd = self.config.get("default_include")
if confd:
# If 'default_include' is specified in config, then use it
if "*" in confd:
# Value is of the form "minion.d/*.conf"
confd = os.path.dirname(confd)
if not os.path.isabs(confd):
# If configured 'default_include' is not an absolute
# path, consider it relative to folder of 'conf_file'
# (/etc/salt by default)
confd = os.path.join(
os.path.dirname(self.config["conf_file"]), confd
)
else:
confd = os.path.join(
os.path.dirname(self.config["conf_file"]), "proxy.d"
)
v_dirs = [
self.config["pki_dir"],
self.config["cachedir"],
self.config["sock_dir"],
self.config["extension_modules"],
confd,
]
verify_env(
v_dirs,
self.config["user"],
permissive=self.config["permissive_pki_access"],
root_dir=self.config["root_dir"],
pki_dir=self.config["pki_dir"],
)
except OSError as error:
self.environment_failure(error)
self.action_log_info('Setting up "{}"'.format(self.config["id"]))
migrations.migrate_paths(self.config)
# Bail out if we find a process running and it matches out pidfile
if self.check_running():
self.action_log_info("An instance is already running. Exiting")
self.shutdown(1)
# TODO: AIO core is separate from transport
# Late import so logging works correctly
import salt.minion
# If the minion key has not been accepted, then Salt enters a loop
# waiting for it, if we daemonize later then the minion could halt
# the boot process waiting for a key to be accepted on the master.
# This is the latest safe place to daemonize
self.daemonize_if_required()
self.set_pidfile()
if self.config.get("master_type") == "func":
salt.minion.eval_master_func(self.config)
self.minion = salt.minion.ProxyMinionManager(self.config)
def start(self):
"""
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
"""
super().start()
try:
if check_user(self.config["user"]):
self.action_log_info("The Proxy Minion is starting up")
self.verify_hash_type()
self.minion.tune_in()
if self.minion.restart:
raise SaltClientError("Proxy Minion could not connect to Master")
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info("Proxy Minion Stopping")
if isinstance(exc, KeyboardInterrupt):
log.warning("Exiting on Ctrl-c")
self.shutdown()
else:
log.error(exc)
self.shutdown(exc.code)
def shutdown(self, exitcode=0, exitmsg=None):
"""
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
"""
if hasattr(self, "minion") and "proxymodule" in self.minion.opts:
proxy_fn = self.minion.opts["proxymodule"].loaded_base_name + ".shutdown"
self.minion.opts["proxymodule"][proxy_fn](self.minion.opts)
self.action_log_info("Shutting down")
super().shutdown(
exitcode,
(
"The Salt {} is shutdown. {}".format(
self.__class__.__name__, (exitmsg or "")
).strip()
),
)
# pylint: enable=no-member
class Syndic(
salt.utils.parsers.SyndicOptionParser, DaemonsMixin
): # pylint: disable=no-init
"""
Create a syndic server
"""
def prepare(self):
"""
Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
"""
super().prepare()
try:
if self.config["verify_env"]:
verify_env(
[
self.config["pki_dir"],
self.config["cachedir"],
self.config["sock_dir"],
self.config["extension_modules"],
],
self.config["user"],
permissive=self.config["permissive_pki_access"],
root_dir=self.config["root_dir"],
pki_dir=self.config["pki_dir"],
)
except OSError as error:
self.environment_failure(error)
self.action_log_info('Setting up "{}"'.format(self.config["id"]))
# Late import so logging works correctly
import salt.minion
self.daemonize_if_required()
self.syndic = salt.minion.SyndicManager(self.config)
self.set_pidfile()
def start(self):
"""
Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
"""
super().start()
if check_user(self.config["user"]):
self.action_log_info("Starting up")
self.verify_hash_type()
try:
self.syndic.tune_in()
except KeyboardInterrupt:
self.action_log_info("Stopping")
self.shutdown()
def shutdown(self, exitcode=0, exitmsg=None):
"""
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
"""
self.action_log_info("Shutting down")
super().shutdown(
exitcode,
(
"The Salt {} is shutdown. {}".format(
self.__class__.__name__, (exitmsg or "")
).strip()
),
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/cli/daemons.py | 0.424173 | 0.163546 | daemons.py | pypi |
try:
import statistics
HAS_STATS = True
except ImportError:
HAS_STATS = False
def __virtual__():
"""
The statistics module must be pip installed
"""
return HAS_STATS
def calc(name, num, oper, minimum=0, maximum=0, ref=None):
"""
Perform a calculation on the ``num`` most recent values. Requires a list.
Valid values for ``oper`` are:
- add: Add last ``num`` values together
- mul: Multiple last ``num`` values together
- mean: Calculate mean of last ``num`` values
- median: Calculate median of last ``num`` values
- median_low: Calculate low median of last ``num`` values
- median_high: Calculate high median of last ``num`` values
- median_grouped: Calculate grouped median of last ``num`` values
- mode: Calculate mode of last ``num`` values
USAGE:
.. code-block:: yaml
foo:
calc.calc:
- name: myregentry
- num: 5
- oper: mean
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if name not in __reg__:
ret["comment"] = "{} not found in register".format(name)
ret["result"] = False
def opadd(vals):
sum = 0
for val in vals:
sum = sum + val
return sum
def opmul(vals):
prod = 0
for val in vals:
prod = prod * val
return prod
ops = {
"add": opadd,
"mul": opmul,
"mean": statistics.mean,
"median": statistics.median,
"median_low": statistics.median_low,
"median_high": statistics.median_high,
"median_grouped": statistics.median_grouped,
"mode": statistics.mode,
}
count = 0
vals = []
__reg__[name]["val"].reverse()
for regitem in __reg__[name]["val"]:
count += 1
if count > num:
break
if ref is None:
vals.append(regitem)
else:
vals.append(regitem[ref])
answer = ops[oper](vals)
if minimum > 0 and answer < minimum:
ret["result"] = False
if 0 < maximum < answer:
ret["result"] = False
ret["changes"] = {
"Number of values": len(vals),
"Operator": oper,
"Answer": answer,
}
return ret
def add(name, num, minimum=0, maximum=0, ref=None):
"""
Adds together the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.add:
- name: myregentry
- num: 5
"""
return calc(
name=name, num=num, oper="add", minimum=minimum, maximum=maximum, ref=ref
)
def mul(name, num, minimum=0, maximum=0, ref=None):
"""
Multiplies together the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mul:
- name: myregentry
- num: 5
"""
return calc(
name=name, num=num, oper="mul", minimum=minimum, maximum=maximum, ref=ref
)
def mean(name, num, minimum=0, maximum=0, ref=None):
"""
Calculates the mean of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mean:
- name: myregentry
- num: 5
"""
return calc(
name=name, num=num, oper="mean", minimum=minimum, maximum=maximum, ref=ref
)
def median(name, num, minimum=0, maximum=0, ref=None):
"""
Calculates the mean of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.median:
- name: myregentry
- num: 5
"""
return calc(
name=name, num=num, oper="median", minimum=minimum, maximum=maximum, ref=ref
)
def median_low(name, num, minimum=0, maximum=0, ref=None):
"""
Calculates the low mean of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.median_low:
- name: myregentry
- num: 5
"""
return calc(
name=name, num=num, oper="median_low", minimum=minimum, maximum=maximum, ref=ref
)
def median_high(name, num, minimum=0, maximum=0, ref=None):
"""
Calculates the high mean of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.median_high:
- name: myregentry
- num: 5
"""
return calc(
name=name,
num=num,
oper="median_high",
minimum=minimum,
maximum=maximum,
ref=ref,
)
def median_grouped(name, num, minimum=0, maximum=0, ref=None):
"""
Calculates the grouped mean of the ``num`` most recent values. Requires a
list.
USAGE:
.. code-block:: yaml
foo:
calc.median_grouped:
- name: myregentry
- num: 5
"""
return calc(
name=name,
num=num,
oper="median_grouped",
minimum=minimum,
maximum=maximum,
ref=ref,
)
def mode(name, num, minimum=0, maximum=0, ref=None):
"""
Calculates the mode of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mode:
- name: myregentry
- num: 5
"""
return calc(
name=name, num=num, oper="mode", minimum=minimum, maximum=maximum, ref=ref
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/thorium/calc.py | 0.87247 | 0.572484 | calc.py | pypi |
import logging
import salt.utils.stringutils
log = logging.getLogger(__file__)
def gt(name, value):
"""
Only succeed if the value in the given register location is greater than
the given value
USAGE:
.. code-block:: yaml
foo:
check.gt:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] > value:
ret["result"] = True
return ret
def gte(name, value):
"""
Only succeed if the value in the given register location is greater or equal
than the given value
USAGE:
.. code-block:: yaml
foo:
check.gte:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] >= value:
ret["result"] = True
return ret
def lt(name, value):
"""
Only succeed if the value in the given register location is less than
the given value
USAGE:
.. code-block:: yaml
foo:
check.lt:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] < value:
ret["result"] = True
return ret
def lte(name, value):
"""
Only succeed if the value in the given register location is less than
or equal the given value
USAGE:
.. code-block:: yaml
foo:
check.lte:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] <= value:
ret["result"] = True
return ret
def eq(name, value):
"""
Only succeed if the value in the given register location is equal to
the given value
USAGE:
.. code-block:: yaml
foo:
check.eq:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] == value:
ret["result"] = True
return ret
def ne(name, value):
"""
Only succeed if the value in the given register location is not equal to
the given value
USAGE:
.. code-block:: yaml
foo:
check.ne:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] != value:
ret["result"] = True
return ret
def contains(
name,
value,
count_lt=None,
count_lte=None,
count_eq=None,
count_gte=None,
count_gt=None,
count_ne=None,
):
"""
Only succeed if the value in the given register location contains
the given value
USAGE:
.. code-block:: yaml
foo:
check.contains:
- value: itni
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
try:
count_compare = (
count_lt or count_lte or count_eq or count_gte or count_gt or count_ne
)
if count_compare:
occurrences = __reg__[name]["val"].count(value)
log.debug("%s appears %s times", value, occurrences)
ret["result"] = True
if count_lt:
ret["result"] &= occurrences < count_lt
if count_lte:
ret["result"] &= occurrences <= count_lte
if count_eq:
ret["result"] &= occurrences == count_eq
if count_gte:
ret["result"] &= occurrences >= count_gte
if count_gt:
ret["result"] &= occurrences > count_gt
if count_ne:
ret["result"] &= occurrences != count_ne
else:
if value in __reg__[name]["val"]:
ret["result"] = True
except TypeError:
pass
return ret
def event(name):
"""
Chekcs for a specific event match and returns result True if the match
happens
USAGE:
.. code-block:: yaml
salt/foo/*/bar:
check.event
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: salt/foo/*/bar
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
for event in __events__:
if salt.utils.stringutils.expr_match(event["tag"], name):
ret["result"] = True
return ret
def len_gt(name, value):
"""
Only succeed if length of the given register location is greater than
the given value.
USAGE:
.. code-block:: yaml
foo:
check.len_gt:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if len(__reg__[name]["val"]) > value:
ret["result"] = True
return ret
def len_gte(name, value):
"""
Only succeed if the length of the given register location is greater or equal
than the given value
USAGE:
.. code-block:: yaml
foo:
check.len_gte:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if len(__reg__[name]["val"]) >= value:
ret["result"] = True
return ret
def len_lt(name, value):
"""
Only succeed if the length of the given register location is less than
the given value.
USAGE:
.. code-block:: yaml
foo:
check.len_lt:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if len(__reg__[name]["val"]) < value:
ret["result"] = True
return ret
def len_lte(name, value):
"""
Only succeed if the length of the given register location is less than
or equal the given value
USAGE:
.. code-block:: yaml
foo:
check.len_lte:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if len(__reg__[name]["val"]) <= value:
ret["result"] = True
return ret
def len_eq(name, value):
"""
Only succeed if the length of the given register location is equal to
the given value.
USAGE:
.. code-block:: yaml
foo:
check.len_eq:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if __reg__[name]["val"] == value:
ret["result"] = True
return ret
def len_ne(name, value):
"""
Only succeed if the length of the given register location is not equal to
the given value.
USAGE:
.. code-block:: yaml
foo:
check.len_ne:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {} not in register".format(name)
return ret
if len(__reg__[name]["val"]) != value:
ret["result"] = True
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/thorium/check.py | 0.583915 | 0.212457 | check.py | pypi |
import salt.utils.stringutils
__func_alias__ = {
"set_": "set",
"list_": "list",
}
def set_(name, add, match):
"""
Add a value to the named set
USAGE:
.. code-block:: yaml
foo:
reg.set:
- add: bar
- match: my/custom/event
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if name not in __reg__:
__reg__[name] = {}
__reg__[name]["val"] = set()
for event in __events__:
if salt.utils.stringutils.expr_match(event["tag"], match):
try:
val = event["data"]["data"].get(add)
except KeyError:
val = event["data"].get(add)
if val is None:
val = "None"
ret["changes"][add] = val
__reg__[name]["val"].add(val)
return ret
def list_(name, add, match, stamp=False, prune=0):
"""
Add the specified values to the named list
If ``stamp`` is True, then the timestamp from the event will also be added
if ``prune`` is set to an integer higher than ``0``, then only the last
``prune`` values will be kept in the list.
USAGE:
.. code-block:: yaml
foo:
reg.list:
- add: bar
- match: my/custom/event
- stamp: True
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if not isinstance(add, list):
add = add.split(",")
if name not in __reg__:
__reg__[name] = {}
__reg__[name]["val"] = []
for event in __events__:
try:
event_data = event["data"]["data"]
except KeyError:
event_data = event["data"]
if salt.utils.stringutils.expr_match(event["tag"], match):
item = {}
for key in add:
if key in event_data:
item[key] = event_data[key]
if stamp is True:
item["time"] = event["data"]["_stamp"]
__reg__[name]["val"].append(item)
if prune > 0:
__reg__[name]["val"] = __reg__[name]["val"][:prune]
return ret
def mean(name, add, match):
"""
Accept a numeric value from the matched events and store a running average
of the values in the given register. If the specified value is not numeric
it will be skipped
USAGE:
.. code-block:: yaml
foo:
reg.mean:
- add: data_field
- match: my/custom/event
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if name not in __reg__:
__reg__[name] = {}
__reg__[name]["val"] = 0
__reg__[name]["total"] = 0
__reg__[name]["count"] = 0
for event in __events__:
try:
event_data = event["data"]["data"]
except KeyError:
event_data = event["data"]
if salt.utils.stringutils.expr_match(event["tag"], match):
if add in event_data:
try:
comp = int(event_data)
except ValueError:
continue
__reg__[name]["total"] += comp
__reg__[name]["count"] += 1
__reg__[name]["val"] = __reg__[name]["total"] / __reg__[name]["count"]
return ret
def clear(name):
"""
Clear the namespace from the register
USAGE:
.. code-block:: yaml
clearns:
reg.clear:
- name: myregister
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if name in __reg__:
__reg__[name].clear()
return ret
def delete(name):
"""
Delete the namespace from the register
USAGE:
.. code-block:: yaml
deletens:
reg.delete:
- name: myregister
"""
ret = {"name": name, "changes": {}, "comment": "", "result": True}
if name in __reg__:
del __reg__[name]
return ret | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/thorium/reg.py | 0.539469 | 0.185283 | reg.py | pypi |
import logging
import pprint
import urllib.parse
import salt.returners
import salt.utils.slack
import salt.utils.yaml
log = logging.getLogger(__name__)
__virtualname__ = "slack"
def _get_options(ret=None):
"""
Get the slack options from salt.
"""
defaults = {"channel": "#general"}
attrs = {
"slack_profile": "profile",
"channel": "channel",
"username": "username",
"as_user": "as_user",
"api_key": "api_key",
"changes": "changes",
"only_show_failed": "only_show_failed",
"yaml_format": "yaml_format",
}
profile_attr = "slack_profile"
profile_attrs = {
"from_jid": "from_jid",
"api_key": "api_key",
"api_version": "api_key",
}
_options = salt.returners.get_returner_options(
__virtualname__,
ret,
attrs,
profile_attr=profile_attr,
profile_attrs=profile_attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults,
)
return _options
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
return __virtualname__
def _post_message(channel, message, username, as_user, api_key=None):
"""
Send a message to a Slack room.
:param channel: The room name.
:param message: The message to send to the Slack room.
:param username: Specify who the message is from.
:param as_user: Sets the profile picture which have been added through Slack itself.
:param api_key: The Slack api key, if not specified in the configuration.
:param api_version: The Slack api version, if not specified in the configuration.
:return: Boolean if message was sent successfully.
"""
parameters = dict()
parameters["channel"] = channel
parameters["username"] = username
parameters["as_user"] = as_user
parameters["text"] = "```" + message + "```" # pre-formatted, fixed-width text
# Slack wants the body on POST to be urlencoded.
result = salt.utils.slack.query(
function="message",
api_key=api_key,
method="POST",
header_dict={"Content-Type": "application/x-www-form-urlencoded"},
data=urllib.parse.urlencode(parameters),
)
log.debug("Slack message post result: %s", result)
if result:
return True
else:
return False
def returner(ret):
"""
Send an slack message with the data
"""
_options = _get_options(ret)
channel = _options.get("channel")
username = _options.get("username")
as_user = _options.get("as_user")
api_key = _options.get("api_key")
changes = _options.get("changes")
only_show_failed = _options.get("only_show_failed")
yaml_format = _options.get("yaml_format")
if not channel:
log.error("slack.channel not defined in salt config")
return
if not username:
log.error("slack.username not defined in salt config")
return
if not as_user:
log.error("slack.as_user not defined in salt config")
return
if not api_key:
log.error("slack.api_key not defined in salt config")
return
if only_show_failed and changes:
log.error(
"cannot define both slack.changes and slack.only_show_failed in salt config"
)
return
returns = ret.get("return")
if changes is True:
returns = {
(key, value)
for key, value in returns.items()
if value["result"] is not True or value["changes"]
}
if only_show_failed is True:
returns = {
(key, value)
for key, value in returns.items()
if value["result"] is not True
}
if yaml_format is True:
returns = salt.utils.yaml.safe_dump(returns)
else:
returns = pprint.pformat(returns)
message = "id: {}\r\nfunction: {}\r\nfunction args: {}\r\njid: {}\r\nreturn: {}\r\n".format(
ret.get("id"), ret.get("fun"), ret.get("fun_args"), ret.get("jid"), returns
)
slack = _post_message(channel, message, username, as_user, api_key)
return slack | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/returners/slack_returner.py | 0.563858 | 0.2466 | slack_returner.py | pypi |
import logging
import salt.returners
import salt.utils.json
import salt.utils.mattermost
log = logging.getLogger(__name__)
__virtualname__ = "mattermost"
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
return __virtualname__
def _get_options(ret=None):
"""
Get the mattermost options from salt.
"""
attrs = {
"channel": "channel",
"username": "username",
"hook": "hook",
"api_url": "api_url",
}
_options = salt.returners.get_returner_options(
__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__
)
log.debug("Options: %s", _options)
return _options
def returner(ret):
"""
Send an mattermost message with the data
"""
_options = _get_options(ret)
api_url = _options.get("api_url")
channel = _options.get("channel")
username = _options.get("username")
hook = _options.get("hook")
if not hook:
log.error("mattermost.hook not defined in salt config")
return
returns = ret.get("return")
message = "id: {}\r\nfunction: {}\r\nfunction args: {}\r\njid: {}\r\nreturn: {}\r\n".format(
ret.get("id"), ret.get("fun"), ret.get("fun_args"), ret.get("jid"), returns
)
mattermost = post_message(channel, message, username, api_url, hook)
return mattermost
def event_return(events):
"""
Send the events to a mattermost room.
:param events: List of events
:return: Boolean if messages were sent successfully.
"""
_options = _get_options()
api_url = _options.get("api_url")
channel = _options.get("channel")
username = _options.get("username")
hook = _options.get("hook")
is_ok = True
for event in events:
log.debug("Event: %s", event)
log.debug("Event data: %s", event["data"])
message = "tag: {}\r\n".format(event["tag"])
for key, value in event["data"].items():
message += "{}: {}\r\n".format(key, value)
result = post_message(channel, message, username, api_url, hook)
if not result:
is_ok = False
return is_ok
def post_message(channel, message, username, api_url, hook):
"""
Send a message to a mattermost room.
:param channel: The room name.
:param message: The message to send to the mattermost room.
:param username: Specify who the message is from.
:param hook: The mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
"""
parameters = dict()
if channel:
parameters["channel"] = channel
if username:
parameters["username"] = username
parameters["text"] = "```" + message + "```" # pre-formatted, fixed-width text
log.debug("Parameters: %s", parameters)
result = salt.utils.mattermost.query(
api_url=api_url,
hook=hook,
data="payload={}".format(salt.utils.json.dumps(parameters)),
)
log.debug("result %s", result)
return bool(result) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/returners/mattermost_returner.py | 0.671686 | 0.152537 | mattermost_returner.py | pypi |
import logging
import salt.returners
import salt.utils.jid
try:
import librato
HAS_LIBRATO = True
except ImportError:
HAS_LIBRATO = False
# Define the module's Virtual Name
__virtualname__ = "librato"
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_LIBRATO:
return (
False,
"Could not import librato module; librato python client is not installed.",
)
return __virtualname__
def _get_options(ret=None):
"""
Get the Librato options from salt.
"""
attrs = {"email": "email", "api_token": "api_token", "api_url": "api_url"}
_options = salt.returners.get_returner_options(
__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__
)
_options["api_url"] = _options.get("api_url", "metrics-api.librato.com")
log.debug("Retrieved Librato options: %s", _options)
return _options
def _get_librato(ret=None):
"""
Return a Librato connection object.
"""
_options = _get_options(ret)
conn = librato.connect(
_options.get("email"),
_options.get("api_token"),
sanitizer=librato.sanitize_metric_name,
hostname=_options.get("api_url"),
)
log.info("Connected to librato.")
return conn
def _calculate_runtimes(states):
results = {"runtime": 0.00, "num_failed_states": 0, "num_passed_states": 0}
for state, resultset in states.items():
if isinstance(resultset, dict) and "duration" in resultset:
# Count the pass vs failures
if resultset["result"]:
results["num_passed_states"] += 1
else:
results["num_failed_states"] += 1
# Count durations
results["runtime"] += resultset["duration"]
log.debug("Parsed state metrics: %s", results)
return results
def returner(ret):
"""
Parse the return data and return metrics to Librato.
"""
librato_conn = _get_librato(ret)
q = librato_conn.new_queue()
if ret["fun"] == "state.highstate":
log.debug("Found returned Highstate data.")
# Calculate the runtimes and number of failed states.
stats = _calculate_runtimes(ret["return"])
log.debug("Batching Metric retcode with %s", ret["retcode"])
q.add("saltstack.highstate.retcode", ret["retcode"], tags={"Name": ret["id"]})
log.debug("Batching Metric num_failed_jobs with %s", stats["num_failed_states"])
q.add(
"saltstack.highstate.failed_states",
stats["num_failed_states"],
tags={"Name": ret["id"]},
)
log.debug(
"Batching Metric num_passed_states with %s", stats["num_passed_states"]
)
q.add(
"saltstack.highstate.passed_states",
stats["num_passed_states"],
tags={"Name": ret["id"]},
)
log.debug("Batching Metric runtime with %s", stats["runtime"])
q.add("saltstack.highstate.runtime", stats["runtime"], tags={"Name": ret["id"]})
log.debug(
"Batching Metric runtime with %s",
stats["num_failed_states"] + stats["num_passed_states"],
)
q.add(
"saltstack.highstate.total_states",
stats["num_failed_states"] + stats["num_passed_states"],
tags={"Name": ret["id"]},
)
log.info("Sending metrics to Librato.")
q.submit() | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/returners/librato_return.py | 0.481941 | 0.18101 | librato_return.py | pypi |
import logging
import socket
import time
import requests
import salt.utils.json
_max_content_bytes = 100000
http_event_collector_debug = False
log = logging.getLogger(__name__)
__virtualname__ = "splunk"
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
return __virtualname__
def returner(ret):
"""
Send a message to Splunk via the HTTP Event Collector.
Requires the Splunk HTTP Event Collector running on port 8088.
This is available on Splunk Enterprise version 6.3 or higher.
"""
# Get Splunk Options
opts = _get_options()
log.info(
"Options: %s",
salt.utils.json.dumps(opts),
)
http_collector = _create_http_event_collector(opts)
payload = _prepare_splunk_payload(ret, opts)
http_collector.sendEvent(payload)
return True
def event_return(events):
"""
Return events to Splunk via the HTTP Event Collector.
Requires the Splunk HTTP Event Collector running on port 8088.
This is available on Splunk Enterprise version 6.3 or higher.
"""
# Get Splunk Options
opts = _get_options()
log.info(
"Options: %s",
salt.utils.json.dumps(opts),
)
http_collector = _create_http_event_collector(opts)
for event in events:
payload = _prepare_splunk_payload(event, opts)
http_collector.sendEvent(payload)
return True
def _get_options():
try:
token = __salt__["config.get"]("splunk_http_forwarder:token")
indexer = __salt__["config.get"]("splunk_http_forwarder:indexer")
sourcetype = __salt__["config.get"]("splunk_http_forwarder:sourcetype")
index = __salt__["config.get"]("splunk_http_forwarder:index")
verify_ssl = __salt__["config.get"](
"splunk_http_forwarder:verify_ssl", default=True
)
except Exception: # pylint: disable=broad-except
log.error("Splunk HTTP Forwarder parameters not present in config.")
return None
splunk_opts = {
"token": token,
"indexer": indexer,
"sourcetype": sourcetype,
"index": index,
"verify_ssl": verify_ssl,
}
return splunk_opts
def _create_http_event_collector(opts):
"""
Prepare a connection to the Splunk HTTP event collector.
"""
http_event_collector_key = opts["token"]
http_event_collector_host = opts["indexer"]
http_event_collector_verify_ssl = opts["verify_ssl"]
# Return the collector
return http_event_collector(
http_event_collector_key,
http_event_collector_host,
verify_ssl=http_event_collector_verify_ssl,
)
def _prepare_splunk_payload(event, opts):
"""
Prepare a payload for submission to the Splunk HTTP event collector.
"""
# Get Splunk Options
opts = _get_options()
# init the payload
payload = {}
# Set up the event metadata
payload.update({"index": opts["index"]})
payload.update({"sourcetype": opts["sourcetype"]})
# Add the event
payload.update({"event": event})
log.info(
"Payload: %s",
salt.utils.json.dumps(payload),
)
return payload
# Thanks to George Starcher for the http_event_collector class (https://github.com/georgestarcher/)
class http_event_collector:
def __init__(
self,
token,
http_event_server,
host="",
http_event_port="8088",
http_event_server_ssl=True,
max_bytes=_max_content_bytes,
verify_ssl=True,
):
self.token = token
self.batchEvents = []
self.maxByteLength = max_bytes
self.currentByteLength = 0
self.verify_ssl = verify_ssl
# Set host to specified value or default to localhostname if no value provided
if host:
self.host = host
else:
self.host = socket.gethostname()
# Build and set server_uri for http event collector
# Defaults to SSL if flag not passed
# Defaults to port 8088 if port not passed
if http_event_server_ssl:
buildURI = ["https://"]
else:
buildURI = ["http://"]
for i in [http_event_server, ":", http_event_port, "/services/collector/event"]:
buildURI.append(i)
self.server_uri = "".join(buildURI)
if http_event_collector_debug:
log.debug(self.token)
log.debug(self.server_uri)
def sendEvent(self, payload, eventtime=""):
# Method to immediately send an event to the http event collector
headers = {"Authorization": "Splunk " + self.token}
# If eventtime in epoch not passed as optional argument use current system time in epoch
if not eventtime:
eventtime = str(int(time.time()))
# Fill in local hostname if not manually populated
if "host" not in payload:
payload.update({"host": self.host})
# Update time value on payload if need to use system time
data = {"time": eventtime}
data.update(payload)
# send event to http event collector
r = requests.post(
self.server_uri,
data=salt.utils.json.dumps(data),
headers=headers,
verify=self.verify_ssl,
)
# Print debug info if flag set
if http_event_collector_debug:
log.debug(r.text)
log.debug(data) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/returners/splunk.py | 0.578805 | 0.176175 | splunk.py | pypi |
import json
import logging
import urllib.parse
import salt.returners
import salt.utils.http
import salt.utils.yaml
log = logging.getLogger(__name__)
__virtualname__ = "slack_webhook"
UNCHANGED_KEY = "unchanged"
CHANGED_KEY = "changed"
FAILED_KEY = "failed"
TASKS_KEY = "tasks"
COUNTER_KEY = "counter"
DURATION_KEY = "duration"
TOTAL_KEY = "total"
def _get_options(ret=None):
"""
Get the slack_webhook options from salt.
:param ret: Salt return dictionary
:return: A dictionary with options
"""
defaults = {
"success_title": "{id} | Succeeded",
"failure_title": "{id} | Failed",
"author_icon": "",
"show_tasks": False,
}
attrs = {
"webhook": "webhook",
"success_title": "success_title",
"failure_title": "failure_title",
"author_icon": "author_icon",
"show_tasks": "show_tasks",
}
_options = salt.returners.get_returner_options(
__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults,
)
return _options
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
return __virtualname__
def _sprinkle(config_str):
"""
Sprinkle with grains of salt, that is
convert "test {id} test {host} " types of strings
:param config_str: The string to be sprinkled
:return: The string sprinkled
"""
parts = [x for sub in config_str.split("{") for x in sub.split("}")]
for i in range(1, len(parts), 2):
parts[i] = str(__grains__.get(parts[i], ""))
return "".join(parts)
def _format_task(task):
"""
Return a dictionary with the task ready for slack fileds
:param task: The name of the task
:return: A dictionary ready to be inserted in Slack fields array
"""
return {"value": task, "short": False}
def _generate_payload(author_icon, title, report, **kwargs):
"""
Prepare the payload for Slack
:param author_icon: The url for the thumbnail to be displayed
:param title: The title of the message
:param report: A dictionary with the report of the Salt function
:return: The payload ready for Slack
"""
event_rtn = kwargs.get("event_rtn", False)
if event_rtn is True:
author_name = report["id"]
else:
author_name = _sprinkle("{id}")
title = _sprinkle(title)
text = "Function: {}\n".format(report.get("function"))
if len(report.get("arguments", [])) > 0:
text += "Function Args: {}\n".format(str(list(map(str, report["arguments"]))))
text += "JID: {}\n".format(report.get("jid"))
if TOTAL_KEY in report:
text += "Total: {}\n".format(report[TOTAL_KEY])
if DURATION_KEY in report:
text += "Duration: {:.2f} secs".format(float(report[DURATION_KEY]))
attachments = [
{
"fallback": title,
"color": "#272727",
"author_name": author_name,
"author_link": _sprinkle("{localhost}"),
"author_icon": author_icon,
"title": "Success: {}".format(str(report["success"])),
"text": text,
}
]
if UNCHANGED_KEY in report:
# Unchanged
attachments.append(
{
"color": "good",
"title": "Unchanged: {}".format(
report[UNCHANGED_KEY].get(COUNTER_KEY, 0)
),
}
)
# Changed
changed = {
"color": "warning",
"title": "Changed: {}".format(report[CHANGED_KEY].get(COUNTER_KEY, 0)),
}
if len(report[CHANGED_KEY].get(TASKS_KEY, [])) > 0:
changed["fields"] = list(map(_format_task, report[CHANGED_KEY][TASKS_KEY]))
attachments.append(changed)
# Failed
failed = {
"color": "danger",
"title": "Failed: {}".format(report[FAILED_KEY].get(COUNTER_KEY, None)),
}
if len(report[FAILED_KEY].get(TASKS_KEY, [])) > 0:
failed["fields"] = list(map(_format_task, report[FAILED_KEY][TASKS_KEY]))
attachments.append(failed)
else:
attachments.append(
{
"color": "good" if report["success"] else "danger",
"title": "Return: {}".format(report.get("return", None)),
}
)
payload = {"attachments": attachments}
return payload
def _process_state(returns):
"""
Process the received output state
:param returns A dictionary with the returns of the recipe
:return A dictionary with Unchanges, Changed and Failed tasks
"""
sorted_data = sorted(returns.items(), key=lambda s: s[1].get("__run_num__", 0))
n_total = 0
n_failed = 0
n_changed = 0
duration = 0.0
changed_tasks = []
failed_tasks = []
# gather stats
for state, data in sorted_data:
# state: module, stateid, name, function
_, stateid, _, _ = state.split("_|-")
task = "{filename}.sls | {taskname}".format(
filename=str(data.get("__sls__")), taskname=stateid
)
if not data.get("result", True):
n_failed += 1
failed_tasks.append(task)
if data.get("changes", {}):
n_changed += 1
changed_tasks.append(task)
n_total += 1
try:
duration += float(data.get("duration", 0.0))
except ValueError:
pass
n_unchanged = n_total - n_failed - n_changed
return {
TOTAL_KEY: n_total,
UNCHANGED_KEY: {COUNTER_KEY: n_unchanged},
CHANGED_KEY: {COUNTER_KEY: n_changed, TASKS_KEY: changed_tasks},
FAILED_KEY: {COUNTER_KEY: n_failed, TASKS_KEY: failed_tasks},
DURATION_KEY: duration / 1000,
}
def _state_return(ret):
"""
Return True if ret is a Salt state return
:param ret: The Salt return
"""
ret_data = ret.get("return")
if not isinstance(ret_data, dict):
return False
return ret_data and "__id__" in next(iter(ret_data.values()))
def _generate_report(ret, show_tasks):
"""
Generate a report of the Salt function
:param ret: The Salt return
:param show_tasks: Flag to show the name of the changed and failed states
:return: The report
"""
report = {
"id": ret.get("id"),
"success": True if ret.get("retcode", 1) == 0 else False,
"function": ret.get("fun"),
"arguments": ret.get("fun_args", []),
"jid": ret.get("jid"),
}
ret_return = ret.get("return")
if _state_return(ret):
ret_return = _process_state(ret_return)
if not show_tasks:
del ret_return[CHANGED_KEY][TASKS_KEY]
del ret_return[FAILED_KEY][TASKS_KEY]
elif isinstance(ret_return, dict):
ret_return = {
"return": "\n{}".format(salt.utils.yaml.safe_dump(ret_return, indent=2))
}
else:
ret_return = {"return": ret_return}
report.update(ret_return)
return report
def _post_message(webhook, author_icon, title, report, **kwargs):
"""
Send a message to a Slack room through a webhook
:param webhook: The url of the incoming webhook
:param author_icon: The thumbnail image to be displayed on the right side of the message
:param title: The title of the message
:param report: The report of the function state
:return: Boolean if message was sent successfully
"""
event_rtn = kwargs.get("event_rtn", False)
payload = _generate_payload(author_icon, title, report, event_rtn=event_rtn)
data = urllib.parse.urlencode({"payload": json.dumps(payload, ensure_ascii=False)})
webhook_url = urllib.parse.urljoin("https://hooks.slack.com/services/", webhook)
query_result = salt.utils.http.query(webhook_url, "POST", data=data)
# Sometimes the status is not available, so status 200 is assumed when it is not present
if (
query_result.get("body", "failed") == "ok"
and query_result.get("status", 200) == 200
):
return True
else:
log.error("Slack incoming webhook message post result: %s", query_result)
return {"res": False, "message": query_result.get("body", query_result)}
def returner(ret, **kwargs):
"""
Send a slack message with the data through a webhook
:param ret: The Salt return
:return: The result of the post
"""
event_rtn = kwargs.get("event_rtn", False)
_options = _get_options(ret)
webhook = _options.get("webhook", None)
show_tasks = _options.get("show_tasks")
author_icon = _options.get("author_icon")
if not webhook or webhook == "":
log.error("%s.webhook not defined in salt config", __virtualname__)
return
report = _generate_report(ret, show_tasks)
if report.get("success"):
title = _options.get("success_title")
else:
title = _options.get("failure_title")
slack = _post_message(webhook, author_icon, title, report, event_rtn=event_rtn)
return slack
def event_return(events):
"""
Send event data to returner function
:param events: The Salt event return
:return: The result of the post
"""
results = None
for event in events:
ret = event.get("data", False)
if (
ret
and "saltutil.find_job" not in ret["fun"]
or "salt/auth" not in ret["tag"]
):
results = returner(ret, event_rtn=True)
return results | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/returners/slack_webhook_return.py | 0.59843 | 0.167593 | slack_webhook_return.py | pypi |
import logging
import salt.returners
import salt.utils.jid
try:
import appoptics_metrics
HAS_APPOPTICS = True
except ImportError:
HAS_APPOPTICS = False
# Define the module's Virtual Name
__virtualname__ = "appoptics"
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_APPOPTICS:
return (
False,
"Could not import appoptics_metrics module; "
"appoptics-metrics python client is not installed.",
)
return __virtualname__
def _get_options(ret=None):
"""
Get the appoptics options from salt.
"""
attrs = {
"api_token": "api_token",
"api_url": "api_url",
"tags": "tags",
"sls_states": "sls_states",
}
_options = salt.returners.get_returner_options(
__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__
)
_options["api_url"] = _options.get("api_url", "api.appoptics.com")
_options["sls_states"] = _options.get("sls_states", [])
_options["tags"] = _options.get(
"tags", {"host_hostname_alias": __salt__["grains.get"]("id")}
)
log.debug("Retrieved appoptics options: %s", _options)
return _options
def _get_appoptics(options):
"""
Return an appoptics connection object.
"""
conn = appoptics_metrics.connect(
options.get("api_token"),
sanitizer=appoptics_metrics.sanitize_metric_name,
hostname=options.get("api_url"),
)
log.info("Connected to appoptics.")
return conn
def _calculate_runtimes(states):
results = {"runtime": 0.00, "num_failed_states": 0, "num_passed_states": 0}
for state, resultset in states.items():
if isinstance(resultset, dict) and "duration" in resultset:
# Count the pass vs failures
if resultset["result"]:
results["num_passed_states"] += 1
else:
results["num_failed_states"] += 1
# Count durations
results["runtime"] += resultset["duration"]
log.debug("Parsed state metrics: %s", results)
return results
def _state_metrics(ret, options, tags):
# Calculate the runtimes and number of failed states.
stats = _calculate_runtimes(ret["return"])
log.debug("Batching Metric retcode with %s", ret["retcode"])
appoptics_conn = _get_appoptics(options)
q = appoptics_conn.new_queue(tags=tags)
q.add("saltstack.retcode", ret["retcode"])
log.debug("Batching Metric num_failed_jobs with %s", stats["num_failed_states"])
q.add("saltstack.failed", stats["num_failed_states"])
log.debug("Batching Metric num_passed_states with %s", stats["num_passed_states"])
q.add("saltstack.passed", stats["num_passed_states"])
log.debug("Batching Metric runtime with %s".stats["runtime"])
q.add("saltstack.runtime", stats["runtime"])
log.debug(
"Batching with Metric total states %s",
stats["num_failed_states"] + stats["num_passed_states"],
)
q.add(
"saltstack.highstate.total_states",
(stats["num_failed_states"] + stats["num_passed_states"]),
)
log.info("Sending metrics to appoptics.")
q.submit()
def returner(ret):
"""
Parse the return data and return metrics to AppOptics.
For each state that's provided in the configuration, return tagged metrics for
the result of that state if it's present.
"""
options = _get_options(ret)
states_to_report = ["state.highstate"]
if options.get("sls_states"):
states_to_report.append("state.sls")
if ret["fun"] in states_to_report:
tags = options.get("tags", {}).copy()
tags["state_type"] = ret["fun"]
log.info("Tags for this run are %s", str(tags))
matched_states = set(ret["fun_args"]).intersection(
set(options.get("sls_states", []))
)
# What can I do if a run has multiple states that match?
# In the mean time, find one matching state name and use it.
if matched_states:
tags["state_name"] = sorted(matched_states)[0]
log.debug("Found returned data from %s.", tags["state_name"])
_state_metrics(ret, options, tags) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/returners/appoptics_return.py | 0.516108 | 0.209975 | appoptics_return.py | pypi |
import salt.pillar
import salt.utils.data
import salt.utils.dictupdate
from salt.defaults import DEFAULT_TARGET_DELIM
try:
# Python 3
from collections.abc import Mapping
except ImportError:
# We still allow Py2 import because this could be executed in a machine with Py2.
from collections import Mapping # pylint: disable=no-name-in-module
def get(key, default="", merge=False, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 0.14
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in pillar looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
merge
Specify whether or not the retrieved values should be recursively
merged into the passed default.
.. versionadded:: 2015.5.0
delimiter
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' pillar.get pkg:apache
"""
if merge:
ret = salt.utils.data.traverse_dict_and_list(
__pillar__.value(), key, {}, delimiter
)
if isinstance(ret, Mapping) and isinstance(default, Mapping):
return salt.utils.dictupdate.update(default, ret)
return salt.utils.data.traverse_dict_and_list(
__pillar__.value(), key, default, delimiter
)
def item(*args):
"""
.. versionadded:: 0.16.2
Return one or more pillar entries
CLI Examples:
.. code-block:: bash
salt '*' pillar.item foo
salt '*' pillar.item foo bar baz
"""
ret = {}
for arg in args:
try:
ret[arg] = __pillar__[arg]
except KeyError:
pass
return ret
def raw(key=None):
"""
Return the raw pillar data that is available in the module. This will
show the pillar as it is loaded as the __pillar__ dict.
CLI Example:
.. code-block:: bash
salt '*' pillar.raw
With the optional key argument, you can select a subtree of the
pillar raw data.::
salt '*' pillar.raw key='roles'
"""
if key:
ret = __pillar__.get(key, {})
else:
ret = __pillar__.value()
return ret
def keys(key, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 2015.8.0
Attempt to retrieve a list of keys from the named value from the pillar.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict, similar to how pillar.get works.
delimiter
Specify an alternate delimiter to use when traversing a nested dict
CLI Example:
.. code-block:: bash
salt '*' pillar.keys web:sites
"""
ret = salt.utils.data.traverse_dict_and_list(
__pillar__.value(), key, KeyError, delimiter
)
if ret is KeyError:
raise KeyError("Pillar key not found: {}".format(key))
if not isinstance(ret, dict):
raise ValueError("Pillar value in key {} is not a dict".format(key))
return ret.keys()
# Allow pillar.data to also be used to return pillar data
items = raw
data = items | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/client/ssh/wrapper/pillar.py | 0.768168 | 0.407805 | pillar.py | pypi |
import logging
import time
import salt.config
import salt.loader
import salt.syspaths
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
def factory(opts, **kwargs):
"""
Creates and returns the cache class.
If memory caching is enabled by opts MemCache class will be instantiated.
If not Cache class will be returned.
"""
if opts.get("memcache_expire_seconds", 0):
cls = MemCache
else:
cls = Cache
return cls(opts, **kwargs)
class Cache:
"""
Base caching object providing access to the modular cache subsystem.
Related configuration options:
:param cache:
The name of the cache driver to use. This is the name of the python
module of the `salt.cache` package. Default is `localfs`.
Terminology:
Salt cache subsystem is organized as a tree with nodes and leafs like a
filesystem. Cache consists of banks. Each bank can contain a number of
keys. Each key can contain a dict or any other object serializable with
`salt.payload`. I.e. any data object in the cache can be
addressed by the path to the bank and the key name:
bank: 'minions/alpha'
key: 'data'
Bank names should be formatted in a way that can be used as a
directory structure. If slashes are included in the name, then they
refer to a nested structure.
Key name is a string identifier of a data container (like a file inside a
directory) which will hold the data.
"""
def __init__(self, opts, cachedir=None, **kwargs):
self.opts = opts
if cachedir is None:
self.cachedir = opts.get("cachedir", salt.syspaths.CACHE_DIR)
else:
self.cachedir = cachedir
self.driver = opts.get("cache", salt.config.DEFAULT_MASTER_OPTS["cache"])
self._modules = None
self._kwargs = kwargs
self._kwargs["cachedir"] = self.cachedir
def __lazy_init(self):
self._modules = salt.loader.cache(self.opts)
fun = "{}.init_kwargs".format(self.driver)
if fun in self.modules:
self._kwargs = self.modules[fun](self._kwargs)
else:
self._kwargs = {}
@property
def modules(self):
if self._modules is None:
self.__lazy_init()
return self._modules
def cache(self, bank, key, fun, loop_fun=None, **kwargs):
"""
Check cache for the data. If it is there, check to see if it needs to
be refreshed.
If the data is not there, or it needs to be refreshed, then call the
callback function (``fun``) with any given ``**kwargs``.
In some cases, the callback function returns a list of objects which
need to be processed by a second function. If that is the case, then
the second function is passed in as ``loop_fun``. Each item in the
return list from the first function will be the only argument for the
second function.
"""
expire_seconds = kwargs.get("expire", 86400) # 1 day
updated = self.updated(bank, key)
update_cache = False
if updated is None:
update_cache = True
else:
if int(time.time()) - updated > expire_seconds:
update_cache = True
data = self.fetch(bank, key)
if not data or update_cache is True:
if loop_fun is not None:
data = []
items = fun(**kwargs)
for item in items:
data.append(loop_fun(item))
else:
data = fun(**kwargs)
self.store(bank, key, data)
return data
def store(self, bank, key, data):
"""
Store data using the specified module
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:param data:
The data which will be stored in the cache. This data should be
in a format which can be serialized by msgpack.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
"""
fun = "{}.store".format(self.driver)
return self.modules[fun](bank, key, data, **self._kwargs)
def fetch(self, bank, key):
"""
Fetch data using the specified module
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Return a python object fetched from the cache or an empty dict if
the given path or key not found.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
"""
fun = "{}.fetch".format(self.driver)
return self.modules[fun](bank, key, **self._kwargs)
def updated(self, bank, key):
"""
Get the last updated epoch for the specified key
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Return an int epoch time in seconds or None if the object wasn't
found in cache.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
"""
fun = "{}.updated".format(self.driver)
return self.modules[fun](bank, key, **self._kwargs)
def flush(self, bank, key=None):
"""
Remove the key from the cache bank with all the key content. If no key is specified remove
the entire bank with all keys and sub-banks inside.
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
"""
fun = "{}.flush".format(self.driver)
return self.modules[fun](bank, key=key, **self._kwargs)
def list(self, bank):
"""
Lists entries stored in the specified bank.
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:return:
An iterable object containing all bank entries. Returns an empty
iterator if the bank doesn't exists.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
"""
fun = "{}.list".format(self.driver)
return self.modules[fun](bank, **self._kwargs)
def contains(self, bank, key=None):
"""
Checks if the specified bank contains the specified key.
:param bank:
The name of the location inside the cache which will hold the key
and its associated data.
:param key:
The name of the key (or file inside a directory) which will hold
the data. File extensions should not be provided, as they will be
added by the driver itself.
:return:
Returns True if the specified key exists in the given bank and False
if not.
If key is None checks for the bank existense.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data
in the cache backend (auth, permissions, etc).
"""
fun = "{}.contains".format(self.driver)
return self.modules[fun](bank, key, **self._kwargs)
class MemCache(Cache):
"""
Short-lived in-memory cache store keeping values on time and/or size (count)
basis.
"""
# {<storage_id>: odict({<key>: [atime, data], ...}), ...}
data = {}
def __init__(self, opts, **kwargs):
super().__init__(opts, **kwargs)
self.expire = opts.get("memcache_expire_seconds", 10)
self.max = opts.get("memcache_max_items", 1024)
self.cleanup = opts.get("memcache_full_cleanup", False)
self.debug = opts.get("memcache_debug", False)
if self.debug:
self.call = 0
self.hit = 0
self._storage = None
@classmethod
def __cleanup(cls, expire):
now = time.time()
for storage in cls.data.values():
for key, data in list(storage.items()):
if data[0] + expire < now:
del storage[key]
else:
break
def _get_storage_id(self):
fun = "{}.storage_id".format(self.driver)
if fun in self.modules:
return self.modules[fun](self.kwargs)
else:
return self.driver
@property
def storage(self):
if self._storage is None:
storage_id = self._get_storage_id()
if storage_id not in MemCache.data:
MemCache.data[storage_id] = OrderedDict()
self._storage = MemCache.data[storage_id]
return self._storage
def fetch(self, bank, key):
if self.debug:
self.call += 1
now = time.time()
record = self.storage.pop((bank, key), None)
# Have a cached value for the key
if record is not None and record[0] + self.expire >= now:
if self.debug:
self.hit += 1
log.debug(
"MemCache stats (call/hit/rate): %s/%s/%s",
self.call,
self.hit,
float(self.hit) / self.call,
)
# update atime and return
record[0] = now
self.storage[(bank, key)] = record
return record[1]
# Have no value for the key or value is expired
data = super().fetch(bank, key)
if len(self.storage) >= self.max:
if self.cleanup:
MemCache.__cleanup(self.expire)
if len(self.storage) >= self.max:
self.storage.popitem(last=False)
self.storage[(bank, key)] = [now, data]
return data
def store(self, bank, key, data):
self.storage.pop((bank, key), None)
super().store(bank, key, data)
if len(self.storage) >= self.max:
if self.cleanup:
MemCache.__cleanup(self.expire)
if len(self.storage) >= self.max:
self.storage.popitem(last=False)
self.storage[(bank, key)] = [time.time(), data]
def flush(self, bank, key=None):
if key is None:
for bank_, key_ in tuple(self.storage):
if bank == bank_:
self.storage.pop((bank_, key_))
else:
self.storage.pop((bank, key), None)
super().flush(bank, key) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/cache/__init__.py | 0.794823 | 0.383959 | __init__.py | pypi |
import os
import salt.utils.path
# Functions that are mapped into an equivalent one in
# transactional_update module
DELEGATION_MAP = {
"state.single": "transactional_update.single",
"state.sls": "transactional_update.sls",
"state.apply": "transactional_update.apply",
"state.highstate": "transactional_update.highstate",
}
# By default, all modules and functions are executed outside the
# transaction. The next two sets will enumerate the exceptions that
# will be routed to transactional_update.call()
DEFAULT_DELEGATED_MODULES = [
"ansible",
"cabal",
"chef",
"cmd",
"composer",
"cp",
"cpan",
"cyg",
"file",
"freeze",
"nix",
"npm",
"pip",
"pkg",
"puppet",
"pyenv",
"rbenv",
"scp",
]
DEFAULT_DELEGATED_FUNCTIONS = []
def __virtual__():
if salt.utils.path.which("transactional-update"):
return True
else:
return (False, "transactional_update executor requires a transactional system")
def execute(opts, data, func, args, kwargs):
"""Delegate into transactional_update module
The ``transactional_update`` module support the execution of
functions inside a transaction, as support apply a state (via
``apply``, ``sls``, ``single`` or ``highstate``).
This execution module can be used to route some Salt modules and
functions to be executed inside the transaction snapshot.
Add this executor in the minion configuration file:
.. code-block:: yaml
module_executors:
- transactional_update
- direct_call
Or use the command line parameter:
.. code-block:: bash
salt-call --module-executors='[transactional_update, direct_call]' test.version
You can also schedule a reboot if needed:
.. code-block:: bash
salt-call --module-executors='[transactional_update]' state.sls stuff activate_transaction=True
There are some configuration parameters supported:
.. code-block:: yaml
# Replace the list of default modules that all the functions
# are delegated to `transactional_update.call()`
delegated_modules: [cmd, pkg]
# Replace the list of default functions that are delegated to
# `transactional_update.call()`
delegated_functions: [pip.install]
# Expand the default list of modules
add_delegated_modules: [ansible]
# Expand the default list of functions
add_delegated_functions: [file.copy]
"""
inside_transaction = os.environ.get("TRANSACTIONAL_UPDATE")
fun = data["fun"]
module, _ = fun.split(".")
delegated_modules = set(opts.get("delegated_modules", DEFAULT_DELEGATED_MODULES))
delegated_functions = set(
opts.get("delegated_functions", DEFAULT_DELEGATED_FUNCTIONS)
)
if "executor_opts" in data:
delegated_modules |= set(data["executor_opts"].get("add_delegated_modules", []))
delegated_functions |= set(
data["executor_opts"].get("add_delegated_functions", [])
)
else:
delegated_modules |= set(opts.get("add_delegated_modules", []))
delegated_functions |= set(opts.get("add_delegated_functions", []))
if fun in DELEGATION_MAP and not inside_transaction:
result = __executors__["direct_call.execute"](
opts, data, __salt__[DELEGATION_MAP[fun]], args, kwargs
)
elif (
module in delegated_modules or fun in delegated_functions
) and not inside_transaction:
result = __salt__["transactional_update.call"](fun, *args, **kwargs)
else:
result = __executors__["direct_call.execute"](opts, data, func, args, kwargs)
return result | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/executors/transactional_update.py | 0.681197 | 0.42316 | transactional_update.py | pypi |
import logging
import time
import salt.utils.stringutils
log = logging.getLogger(__name__)
_DEFAULT_SPLAYTIME = 300
_HASH_SIZE = 8192
_HASH_VAL = None
def __init__(opts):
global _HASH_VAL
_HASH_VAL = _get_hash()
def _get_hash():
"""
Jenkins One-At-A-Time Hash Function
More Info: http://en.wikipedia.org/wiki/Jenkins_hash_function#one-at-a-time
"""
# Using bitmask to emulate rollover behavior of C unsigned 32 bit int
bitmask = 0xFFFFFFFF
h = 0
for i in bytearray(salt.utils.stringutils.to_bytes(__grains__["id"])):
h = (h + i) & bitmask
h = (h + (h << 10)) & bitmask
h = (h ^ (h >> 6)) & bitmask
h = (h + (h << 3)) & bitmask
h = (h ^ (h >> 11)) & bitmask
h = (h + (h << 15)) & bitmask
return (h & (_HASH_SIZE - 1)) & bitmask
def _calc_splay(splaytime):
return int(splaytime * _HASH_VAL / float(_HASH_SIZE))
def execute(opts, data, func, args, kwargs):
"""
Splay a salt function call execution time across minions over
a number of seconds (default: 300)
.. note::
You *probably* want to use --async here and look up the job results later.
If you're dead set on getting the output from the CLI command, then make
sure to set the timeout (with the -t flag) to something greater than the
splaytime (max splaytime + time to execute job).
Otherwise, it's very likely that the cli will time out before the job returns.
CLI Example:
.. code-block:: bash
# With default splaytime
salt --async --module-executors='[splay, direct_call]' '*' pkg.install cowsay version=3.03-8.el6
.. code-block:: bash
# With specified splaytime (5 minutes) and timeout with 10 second buffer
salt -t 310 --module-executors='[splay, direct_call]' --executor-opts='{splaytime: 300}' '*' pkg.version cowsay
"""
if "executor_opts" in data and "splaytime" in data["executor_opts"]:
splaytime = data["executor_opts"]["splaytime"]
else:
splaytime = opts.get("splaytime", _DEFAULT_SPLAYTIME)
if splaytime <= 0:
raise ValueError("splaytime must be a positive integer")
fun_name = data.get("fun")
my_delay = _calc_splay(splaytime)
log.debug("Splay is sleeping %s secs on %s", my_delay, fun_name)
time.sleep(my_delay)
return None | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/executors/splay.py | 0.555435 | 0.175114 | splay.py | pypi |
import copy
import datetime
import logging
import yaml
from salt.serializers import DeserializationError, SerializationError
from salt.utils.aggregation import Map, Sequence, aggregate
from salt.utils.odict import OrderedDict
from yaml.constructor import ConstructorError
from yaml.nodes import MappingNode
from yaml.scanner import ScannerError
__all__ = ["deserialize", "serialize", "available"]
log = logging.getLogger(__name__)
available = True
# prefer C bindings over python when available
BaseLoader = getattr(yaml, "CSafeLoader", yaml.SafeLoader)
# CSafeDumper causes repr errors in python3, so use the pure Python one
try:
# Depending on how PyYAML was built, yaml.SafeDumper may actually be
# yaml.dumper.SafeDumper.
BaseDumper = yaml.dumper.SafeDumper
except AttributeError:
# Here just in case, but yaml.dumper.SafeDumper should always exist
BaseDumper = yaml.SafeDumper
ERROR_MAP = {
"found character '\\t' that cannot start any token": "Illegal tab character"
}
def deserialize(stream_or_string, **options):
"""
Deserialize any string of stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower yaml module.
"""
options.setdefault("Loader", Loader)
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
log.exception("Error encountered while deserializing")
err_type = ERROR_MAP.get(error.problem, "Unknown yaml render error")
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type, line_num, error.problem_mark.buffer)
except ConstructorError as error:
log.exception("Error encountered while deserializing")
raise DeserializationError(error)
except Exception as error: # pylint: disable=broad-except
log.exception("Error encountered while deserializing")
raise DeserializationError(error)
def serialize(obj, **options):
"""
Serialize Python data to YAML.
:param obj: the data structure to serialize
:param options: options given to lower yaml module.
"""
options.setdefault("Dumper", Dumper)
options.setdefault("default_flow_style", None)
try:
response = yaml.dump(obj, **options)
if response.endswith("\n...\n"):
return response[:-5]
if response.endswith("\n"):
return response[:-1]
return response
except Exception as error: # pylint: disable=broad-except
log.exception("Error encountered while serializing")
raise SerializationError(error)
class Loader(BaseLoader): # pylint: disable=W0232
"""
Create a custom YAML loader that uses the custom constructor. This allows
for the YAML loading defaults to be manipulated based on needs within salt
to make things like sls file more intuitive.
"""
DEFAULT_SCALAR_TAG = "tag:yaml.org,2002:str"
DEFAULT_SEQUENCE_TAG = "tag:yaml.org,2002:seq"
DEFAULT_MAPPING_TAG = "tag:yaml.org,2002:omap"
def compose_document(self):
node = BaseLoader.compose_document(self)
node.tag = "!aggregate"
return node
def construct_yaml_omap(self, node):
"""
Build the SLSMap
"""
sls_map = SLSMap()
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
"expected a mapping node, but found {}".format(node.id),
node.start_mark,
)
self.flatten_mapping(node)
for key_node, value_node in node.value:
# !reset instruction applies on document only.
# It tells to reset previous decoded value for this present key.
reset = key_node.tag == "!reset"
# even if !aggregate tag apply only to values and not keys
# it's a reason to act as a such nazi.
if key_node.tag == "!aggregate":
log.warning("!aggregate applies on values only, not on keys")
value_node.tag = key_node.tag
key_node.tag = self.resolve_sls_tag(key_node)[0]
key = self.construct_object(key_node, deep=False)
try:
hash(key)
except TypeError:
raise ConstructorError(
"While constructing a mapping {} found unacceptable key {}".format(
node.start_mark, key_node.start_mark
)
)
value = self.construct_object(value_node, deep=False)
if key in sls_map and not reset:
value = merge_recursive(sls_map[key], value)
sls_map[key] = value
return sls_map
def construct_sls_str(self, node):
"""
Build the SLSString.
"""
# Ensure obj is str, not py2 unicode or py3 bytes
obj = self.construct_scalar(node)
return SLSString(obj)
def construct_sls_int(self, node):
"""
Verify integers and pass them in correctly is they are declared
as octal
"""
if node.value == "0":
pass
elif node.value.startswith("0") and not node.value.startswith(("0b", "0x")):
node.value = node.value.lstrip("0")
# If value was all zeros, node.value would have been reduced to
# an empty string. Change it to '0'.
if node.value == "":
node.value = "0"
return int(node.value)
def construct_sls_aggregate(self, node):
try:
tag, deep = self.resolve_sls_tag(node)
except Exception: # pylint: disable=broad-except
raise ConstructorError("unable to build reset")
node = copy.copy(node)
node.tag = tag
obj = self.construct_object(node, deep)
if obj is None:
return AggregatedSequence()
elif tag == self.DEFAULT_MAPPING_TAG:
return AggregatedMap(obj)
elif tag == self.DEFAULT_SEQUENCE_TAG:
return AggregatedSequence(obj)
return AggregatedSequence([obj])
def construct_sls_reset(self, node):
try:
tag, deep = self.resolve_sls_tag(node)
except Exception: # pylint: disable=broad-except
raise ConstructorError("unable to build reset")
node = copy.copy(node)
node.tag = tag
return self.construct_object(node, deep)
def resolve_sls_tag(self, node):
if isinstance(node, yaml.nodes.ScalarNode):
# search implicit tag
tag = self.resolve(yaml.nodes.ScalarNode, node.value, [True, True])
deep = False
elif isinstance(node, yaml.nodes.SequenceNode):
tag = self.DEFAULT_SEQUENCE_TAG
deep = True
elif isinstance(node, yaml.nodes.MappingNode):
tag = self.DEFAULT_MAPPING_TAG
deep = True
else:
raise ConstructorError("unable to resolve tag")
return tag, deep
Loader.add_constructor("!aggregate", Loader.construct_sls_aggregate) # custom type
Loader.add_constructor("!reset", Loader.construct_sls_reset) # custom type
Loader.add_constructor(
"tag:yaml.org,2002:omap", Loader.construct_yaml_omap
) # our overwrite
Loader.add_constructor(
"tag:yaml.org,2002:str", Loader.construct_sls_str
) # our overwrite
Loader.add_constructor(
"tag:yaml.org,2002:int", Loader.construct_sls_int
) # our overwrite
Loader.add_multi_constructor("tag:yaml.org,2002:null", Loader.construct_yaml_null)
Loader.add_multi_constructor("tag:yaml.org,2002:bool", Loader.construct_yaml_bool)
Loader.add_multi_constructor("tag:yaml.org,2002:float", Loader.construct_yaml_float)
Loader.add_multi_constructor("tag:yaml.org,2002:binary", Loader.construct_yaml_binary)
Loader.add_multi_constructor(
"tag:yaml.org,2002:timestamp", Loader.construct_yaml_timestamp
)
Loader.add_multi_constructor("tag:yaml.org,2002:pairs", Loader.construct_yaml_pairs)
Loader.add_multi_constructor("tag:yaml.org,2002:set", Loader.construct_yaml_set)
Loader.add_multi_constructor("tag:yaml.org,2002:seq", Loader.construct_yaml_seq)
Loader.add_multi_constructor("tag:yaml.org,2002:map", Loader.construct_yaml_map)
class SLSMap(OrderedDict):
"""
Ensures that dict str() and repr() are YAML friendly.
.. code-block:: python
>>> mapping = OrderedDict([('a', 'b'), ('c', None)])
>>> print mapping
OrderedDict([('a', 'b'), ('c', None)])
>>> sls_map = SLSMap(mapping)
>>> print sls_map.__str__()
{a: b, c: null}
"""
def __str__(self):
return serialize(self, default_flow_style=True)
def __repr__(self, _repr_running=None):
return serialize(self, default_flow_style=True)
class SLSString(str):
"""
Ensures that str str() and repr() are YAML friendly.
.. code-block:: python
>>> scalar = str('foo')
>>> print 'foo'
foo
>>> sls_scalar = SLSString(scalar)
>>> print sls_scalar
"foo"
"""
def __str__(self):
return serialize(self, default_style='"')
def __repr__(self):
return serialize(self, default_style='"')
class AggregatedMap(SLSMap, Map):
pass
class AggregatedSequence(Sequence):
pass
class Dumper(BaseDumper): # pylint: disable=W0232
"""
sls dumper.
"""
def represent_odict(self, data):
return self.represent_mapping("tag:yaml.org,2002:map", list(data.items()))
Dumper.add_multi_representer(type(None), Dumper.represent_none)
Dumper.add_multi_representer(bytes, Dumper.represent_binary)
Dumper.add_multi_representer(str, Dumper.represent_str)
Dumper.add_multi_representer(bool, Dumper.represent_bool)
Dumper.add_multi_representer(int, Dumper.represent_int)
Dumper.add_multi_representer(float, Dumper.represent_float)
Dumper.add_multi_representer(list, Dumper.represent_list)
Dumper.add_multi_representer(tuple, Dumper.represent_list)
Dumper.add_multi_representer(
dict, Dumper.represent_odict
) # make every dict like obj to be represented as a map
Dumper.add_multi_representer(set, Dumper.represent_set)
Dumper.add_multi_representer(datetime.date, Dumper.represent_date)
Dumper.add_multi_representer(datetime.datetime, Dumper.represent_datetime)
Dumper.add_multi_representer(None, Dumper.represent_undefined)
def merge_recursive(obj_a, obj_b, level=False):
"""
Merge obj_b into obj_a.
"""
return aggregate(
obj_a, obj_b, level, map_class=AggregatedMap, sequence_class=AggregatedSequence
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/serializers/yamlex.py | 0.687945 | 0.186613 | yamlex.py | pypi |
import datetime
import logging
import yaml
from salt.serializers import DeserializationError, SerializationError
from salt.utils.odict import OrderedDict
from yaml.constructor import ConstructorError
from yaml.scanner import ScannerError
__all__ = ["deserialize", "serialize", "available"]
log = logging.getLogger(__name__)
available = True
# prefer C bindings over python when available
BaseLoader = getattr(yaml, "CSafeLoader", yaml.SafeLoader)
BaseDumper = getattr(yaml, "CSafeDumper", yaml.SafeDumper)
ERROR_MAP = {
"found character '\\t' that cannot start any token": "Illegal tab character"
}
def deserialize(stream_or_string, **options):
"""
Deserialize any string of stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower yaml module.
"""
options.setdefault("Loader", Loader)
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
log.exception("Error encountered while deserializing")
err_type = ERROR_MAP.get(error.problem, "Unknown yaml render error")
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type, line_num, error.problem_mark.buffer)
except ConstructorError as error:
log.exception("Error encountered while deserializing")
raise DeserializationError(error)
except Exception as error: # pylint: disable=broad-except
log.exception("Error encountered while deserializing")
raise DeserializationError(error)
def serialize(obj, **options):
"""
Serialize Python data to YAML.
:param obj: the data structure to serialize
:param options: options given to lower yaml module.
"""
options.setdefault("Dumper", Dumper)
options.setdefault("default_flow_style", None)
try:
response = yaml.dump(obj, **options)
if response.endswith("\n...\n"):
return response[:-5]
if response.endswith("\n"):
return response[:-1]
return response
except Exception as error: # pylint: disable=broad-except
log.exception("Error encountered while serializing")
raise SerializationError(error)
class EncryptedString(str):
yaml_tag = "!encrypted"
@staticmethod
def yaml_constructor(loader, tag, node):
return EncryptedString(loader.construct_scalar(node))
@staticmethod
def yaml_dumper(dumper, data):
return dumper.represent_scalar(EncryptedString.yaml_tag, data.__str__())
class Loader(BaseLoader): # pylint: disable=W0232
"""Overwrites Loader as not for pollute legacy Loader"""
Loader.add_multi_constructor(EncryptedString.yaml_tag, EncryptedString.yaml_constructor)
Loader.add_multi_constructor("tag:yaml.org,2002:null", Loader.construct_yaml_null)
Loader.add_multi_constructor("tag:yaml.org,2002:bool", Loader.construct_yaml_bool)
Loader.add_multi_constructor("tag:yaml.org,2002:int", Loader.construct_yaml_int)
Loader.add_multi_constructor("tag:yaml.org,2002:float", Loader.construct_yaml_float)
Loader.add_multi_constructor("tag:yaml.org,2002:binary", Loader.construct_yaml_binary)
Loader.add_multi_constructor(
"tag:yaml.org,2002:timestamp", Loader.construct_yaml_timestamp
)
Loader.add_multi_constructor("tag:yaml.org,2002:omap", Loader.construct_yaml_omap)
Loader.add_multi_constructor("tag:yaml.org,2002:pairs", Loader.construct_yaml_pairs)
Loader.add_multi_constructor("tag:yaml.org,2002:set", Loader.construct_yaml_set)
Loader.add_multi_constructor("tag:yaml.org,2002:str", Loader.construct_yaml_str)
Loader.add_multi_constructor("tag:yaml.org,2002:seq", Loader.construct_yaml_seq)
Loader.add_multi_constructor("tag:yaml.org,2002:map", Loader.construct_yaml_map)
class Dumper(BaseDumper): # pylint: disable=W0232
"""Overwrites Dumper as not for pollute legacy Dumper"""
Dumper.add_multi_representer(EncryptedString, EncryptedString.yaml_dumper)
Dumper.add_multi_representer(type(None), Dumper.represent_none)
Dumper.add_multi_representer(str, Dumper.represent_str)
Dumper.add_multi_representer(bool, Dumper.represent_bool)
Dumper.add_multi_representer(int, Dumper.represent_int)
Dumper.add_multi_representer(float, Dumper.represent_float)
Dumper.add_multi_representer(list, Dumper.represent_list)
Dumper.add_multi_representer(tuple, Dumper.represent_list)
Dumper.add_multi_representer(dict, Dumper.represent_dict)
Dumper.add_multi_representer(set, Dumper.represent_set)
Dumper.add_multi_representer(datetime.date, Dumper.represent_date)
Dumper.add_multi_representer(datetime.datetime, Dumper.represent_datetime)
Dumper.add_multi_representer(None, Dumper.represent_undefined)
Dumper.add_multi_representer(OrderedDict, Dumper.represent_dict) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/serializers/yaml.py | 0.672547 | 0.153486 | yaml.py | pypi |
import configparser
import io
from salt.serializers import DeserializationError, SerializationError
__all__ = ["deserialize", "serialize", "available"]
available = True
def deserialize(stream_or_string, **options):
"""
Deserialize any string or stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower configparser module.
"""
cp = configparser.ConfigParser(**options)
try:
if not isinstance(stream_or_string, (bytes, str)):
cp.read_file(stream_or_string)
else:
cp.read_file(io.StringIO(stream_or_string))
data = {}
for section_name in cp.sections():
section = {}
for k, v in cp.items(section_name):
section[k] = v
data[section_name] = section
return data
except Exception as error: # pylint: disable=broad-except
raise DeserializationError(error)
def serialize(obj, **options):
"""
Serialize Python data to a configparser formatted string or file.
:param obj: the data structure to serialize
:param options: options given to lower configparser module.
"""
try:
if not isinstance(obj, dict):
raise TypeError(
"configparser can only serialize dictionaries, not {}".format(type(obj))
)
fp = options.pop("fp", None)
cp = configparser.ConfigParser(**options)
_read_dict(cp, obj)
if fp:
return cp.write(fp)
else:
s = io.StringIO()
cp.write(s)
return s.getvalue()
except Exception as error: # pylint: disable=broad-except
raise SerializationError(error)
def _is_defaultsect(section_name):
return section_name == configparser.DEFAULTSECT
def _read_dict(cp, dictionary):
"""
Cribbed from python3's ConfigParser.read_dict function.
"""
for section, keys in dictionary.items():
section = str(section)
if not _is_defaultsect(section):
cp.add_section(section)
for key, value in keys.items():
key = cp.optionxform(str(key))
if value is not None:
value = str(value)
cp.set(section, key, value) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/serializers/configparser.py | 0.563378 | 0.169819 | configparser.py | pypi |
r"""
The Linode Cloud Module
=======================
The Linode cloud module is used to interact with the Linode Cloud.
You can target a specific version of the Linode API with the ``api_version`` parameter. The default is ``v3``.
Provider
--------
The following provider parameters are supported:
- **apikey**: (required) The key to use to authenticate with the Linode API.
- **password**: (required) The default password to set on new VMs. Must be 8 characters with at least one lowercase, uppercase, and numeric.
- **api_version**: (optional) The version of the Linode API to interact with. Defaults to ``v3``.
- **poll_interval**: (optional) The rate of time in milliseconds to poll the Linode API for changes. Defaults to ``500``.
- **ratelimit_sleep**: (optional) The time in seconds to wait before retrying after a ratelimit has been enforced. Defaults to ``0``.
.. note::
APIv3 usage is deprecated and will be removed in a future release in favor of APIv4. To move to APIv4 now,
set the ``api_version`` parameter in your provider configuration to ``v4``. See the full migration guide
here https://docs.saltproject.io/en/latest/topics/cloud/linode.html#migrating-to-apiv4.
Set up the provider configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
driver: linode
api_version: v4
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
For use with APIv3 (deprecated):
.. code-block:: yaml
my-linode-provider-v3:
driver: linode
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
Profile
-------
The following profile parameters are supported:
- **size**: (required) The size of the VM. This should be a Linode instance type ID (i.e. ``g6-standard-2``). For APIv3, this would be a plan ID (i.e. ``Linode 2GB``). Run ``salt-cloud -f avail_sizes my-linode-provider`` for options.
- **location**: (required) The location of the VM. This should be a Linode region (e.g. ``us-east``). For APIv3, this would be a datacenter location (i.e. ``Newark, NJ, USA``). Run ``salt-cloud -f avail_locations my-linode-provider`` for options.
- **image**: (required) The image to deploy the boot disk from. This should be an image ID (e.g. ``linode/ubuntu16.04``); official images start with ``linode/``. For APIv3, this would be an image label (i.e. Ubuntu 16.04). Run ``salt-cloud -f avail_images my-linode-provider`` for more options.
- **password**: (\*required) The default password for the VM. Must be provided at the profile or provider level.
- **assign_private_ip**: (optional) Whether or not to assign a private key to the VM. Defaults to ``False``.
- **ssh_interface**: (optional) The interface with which to connect over SSH. Valid options are ``private_ips`` or ``public_ips``. Defaults to ``public_ips``.
- **ssh_pubkey**: (optional) The public key to authorize for SSH with the VM.
- **swap**: (optional) The amount of disk space to allocate for the swap partition. Defaults to ``256``.
- **clonefrom**: (optional) The name of the Linode to clone from.
- **disk_size**: (deprecated, optional) The amount of disk space to allocate for the OS disk. This has no effect with APIv4; the size of the boot disk will be the remainder of disk space after the swap parition is allocated.
Set up a profile configuration in ``/etc/salt/cloud.profiles.d/``:
.. code-block:: yaml
my-linode-profile:
# a minimal configuration
provider: my-linode-provider
size: g6-standard-1
image: linode/alpine3.12
location: us-east
my-linode-profile-advanced:
# an advanced configuration
provider: my-linode-provider
size: g6-standard-3
image: linode/alpine3.10
location: eu-west
password: bogus123X
assign_private_ip: true
ssh_interface: private_ips
ssh_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQAB...
swap_size: 512
my-linode-profile-v3:
# a legacy configuration
provider: my-linode-provider-v3
size: Nanode 1GB
image: Alpine 3.12
location: Fremont, CA, USA
Migrating to APIv4
------------------
In order to target APIv4, ensure your provider configuration has ``api_version`` set to ``v4``.
You will also need to generate a new token for your account. See https://www.linode.com/docs/platform/api/getting-started-with-the-linode-api/#create-an-api-token
There are a few changes to note:
- There has been a general move from label references to ID references. The profile configuration parameters ``location``, ``size``, and ``image`` have moved from being label based references to IDs. See the profile section for more information. In addition to these inputs being changed, ``avail_sizes``, ``avail_locations``, and ``avail_images`` now output options sorted by ID instead of label.
- The ``disk_size`` profile configuration parameter has been deprecated and will not be taken into account when creating new VMs while targeting APIv4.
:maintainer: Charles Kenney <ckenney@linode.com>
:maintainer: Phillip Campbell <pcampbell@linode.com>
:depends: requests
"""
import abc
import datetime
import json
import logging
import pprint
import re
import time
from pathlib import Path
import salt.config as config
from salt._compat import ipaddress
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit,
)
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
HAS_WARNED_FOR_API_V3 = False
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields for APIv3 (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
"boot_failed": {"code": -2, "descr": "Boot Failed (not in use)"},
"beeing_created": {"code": -1, "descr": "Being Created"},
"brand_new": {"code": 0, "descr": "Brand New"},
"running": {"code": 1, "descr": "Running"},
"poweroff": {"code": 2, "descr": "Powered Off"},
"shutdown": {"code": 3, "descr": "Shutting Down (not in use)"},
"save_to_disk": {"code": 4, "descr": "Saved to Disk (not in use)"},
}
__virtualname__ = "linode"
# Only load in this module if the Linode configurations are in place
def __virtual__():
"""
Check for Linode configs.
"""
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("apikey", "password"),
)
def _get_dependencies():
"""
Warn if dependencies aren't met.
"""
deps = {"requests": HAS_REQUESTS}
return config.check_driver_dependencies(__virtualname__, deps)
def _get_api_version():
"""
Return the configured Linode API version.
"""
return config.get_cloud_config_value(
"api_version",
get_configured_provider(),
__opts__,
search_global=False,
default="v3",
)
def _is_api_v3():
"""
Return whether the configured Linode API version is ``v3``.
"""
return _get_api_version() == "v3"
def _get_cloud_interface():
if _is_api_v3():
return LinodeAPIv3()
return LinodeAPIv4()
def _get_api_key():
"""
Returned the configured Linode API key.
"""
val = config.get_cloud_config_value(
"api_key",
get_configured_provider(),
__opts__,
search_global=False,
default=config.get_cloud_config_value(
"apikey", get_configured_provider(), __opts__, search_global=False
),
)
return val
def _get_ratelimit_sleep():
"""
Return the configured time to wait before retrying after a ratelimit has been enforced.
"""
return config.get_cloud_config_value(
"ratelimit_sleep",
get_configured_provider(),
__opts__,
search_global=False,
default=0,
)
def _get_poll_interval():
"""
Return the configured interval in milliseconds to poll the Linode API for changes at.
"""
return config.get_cloud_config_value(
"poll_interval",
get_configured_provider(),
__opts__,
search_global=False,
default=500,
)
def _get_password(vm_):
r"""
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
"""
return config.get_cloud_config_value(
"password",
vm_,
__opts__,
default=config.get_cloud_config_value(
"passwd", vm_, __opts__, search_global=False
),
search_global=False,
)
def _get_root_disk_size(vm_):
"""
Return the specified size of the data partition.
"""
return config.get_cloud_config_value(
"disk_size", vm_, __opts__, search_global=False
)
def _get_private_ip(vm_):
"""
Return True if a private ip address is requested
"""
return config.get_cloud_config_value(
"assign_private_ip", vm_, __opts__, default=False
)
def _get_ssh_key_files(vm_):
"""
Return the configured file paths of the SSH keys.
"""
return config.get_cloud_config_value(
"ssh_key_files", vm_, __opts__, search_global=False, default=[]
)
def _get_ssh_key(vm_):
r"""
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
"""
return config.get_cloud_config_value(
"ssh_pubkey", vm_, __opts__, search_global=False
)
def _get_swap_size(vm_):
r"""
Returns the amount of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
"""
return config.get_cloud_config_value("swap", vm_, __opts__, default=256)
def _get_ssh_keys(vm_):
"""
Return all SSH keys from ``ssh_pubkey`` and ``ssh_key_files``.
"""
ssh_keys = set()
raw_pub_key = _get_ssh_key(vm_)
if raw_pub_key is not None:
ssh_keys.add(raw_pub_key)
key_files = _get_ssh_key_files(vm_)
for file in map(lambda file: Path(file).resolve(), key_files):
if not (file.exists() or file.is_file()):
raise SaltCloudSystemExit("Invalid SSH key file: {}".format(str(file)))
ssh_keys.add(file.read_text())
return list(ssh_keys)
def _get_ssh_interface(vm_):
"""
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
"""
return config.get_cloud_config_value(
"ssh_interface", vm_, __opts__, default="public_ips", search_global=False
)
def _validate_name(name):
"""
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
"""
name = str(name)
name_length = len(name)
regex = re.compile(r"^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$")
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
"A Linode label may only contain ASCII letters or numbers, dashes, and "
"underscores, must begin and end with letters or numbers, and be at least "
"three characters in length."
)
return ret
def _warn_for_api_v3():
global HAS_WARNED_FOR_API_V3
if not HAS_WARNED_FOR_API_V3:
log.warning(
"Linode APIv3 has been deprecated and support will be removed "
"in future releases. Please plan to upgrade to APIv4. For more "
"information, see"
" https://docs.saltproject.io/en/latest/topics/cloud/linode.html#migrating-to-apiv4."
)
HAS_WARNED_FOR_API_V3 = True
class LinodeAPI:
@abc.abstractmethod
def avail_images(self):
"""avail_images implementation"""
@abc.abstractmethod
def avail_locations(self):
"""avail_locations implementation"""
@abc.abstractmethod
def avail_sizes(self):
"""avail_sizes implementation"""
@abc.abstractmethod
def boot(self, name=None, kwargs=None):
"""boot implementation"""
@abc.abstractmethod
def clone(self, kwargs=None):
"""clone implementation"""
@abc.abstractmethod
def create_config(self, kwargs=None):
"""create_config implementation"""
@abc.abstractmethod
def create(self, vm_):
"""create implementation"""
@abc.abstractmethod
def destroy(self, name):
"""destroy implementation"""
@abc.abstractmethod
def get_config_id(self, kwargs=None):
"""get_config_id implementation"""
@abc.abstractmethod
def list_nodes(self):
"""list_nodes implementation"""
@abc.abstractmethod
def list_nodes_full(self):
"""list_nodes_full implementation"""
@abc.abstractmethod
def list_nodes_min(self):
"""list_nodes_min implementation"""
@abc.abstractmethod
def reboot(self, name):
"""reboot implementation"""
@abc.abstractmethod
def show_instance(self, name):
"""show_instance implementation"""
@abc.abstractmethod
def show_pricing(self, kwargs=None):
"""show_pricing implementation"""
@abc.abstractmethod
def start(self, name):
"""start implementation"""
@abc.abstractmethod
def stop(self, name):
"""stop implementation"""
@abc.abstractmethod
def _get_linode_by_name(self, name):
"""_get_linode_by_name implementation"""
@abc.abstractmethod
def _get_linode_by_id(self, linode_id):
"""_get_linode_by_id implementation"""
def get_plan_id(self, kwargs=None):
"""get_plan_id implementation"""
raise SaltCloudSystemExit(
"The get_plan_id is not supported by this api_version."
)
def get_linode(self, kwargs=None):
name = kwargs.get("name", None)
linode_id = kwargs.get("linode_id", None)
if linode_id is not None:
return self._get_linode_by_id(linode_id)
elif name is not None:
return self._get_linode_by_name(name)
raise SaltCloudSystemExit(
"The get_linode function requires either a 'name' or a 'linode_id'."
)
def list_nodes_select(self, call):
return __utils__["cloud.list_nodes_select"](
self.list_nodes_full(),
__opts__["query.selection"],
call,
)
class LinodeAPIv4(LinodeAPI):
def _query(self, path=None, method="GET", data=None, headers=None):
"""
Make a call to the Linode API.
"""
api_version = _get_api_version()
api_key = _get_api_key()
ratelimit_sleep = _get_ratelimit_sleep()
if headers is None:
headers = {}
headers["Authorization"] = "Bearer {}".format(api_key)
headers["Content-Type"] = "application/json"
headers["User-Agent"] = "salt-cloud-linode"
url = "https://api.linode.com/{}{}".format(api_version, path)
decode = method != "DELETE"
result = None
log.debug("Linode API request: %s %s", method, url)
if data is not None:
log.trace("Linode API request body: %s", data)
attempt = 0
while True:
try:
result = requests.request(method, url, json=data, headers=headers)
log.debug("Linode API response status code: %d", result.status_code)
log.trace("Linode API response body: %s", result.text)
result.raise_for_status()
break
except requests.exceptions.HTTPError as exc:
err_response = exc.response
err_data = self._get_response_json(err_response)
status_code = err_response.status_code
if status_code == 429:
log.debug(
"received rate limit; retrying in %d seconds", ratelimit_sleep
)
time.sleep(ratelimit_sleep)
continue
if err_data is not None:
# Build an error from the response JSON
if "error" in err_data:
raise SaltCloudSystemExit(
"Linode API reported error: {}".format(err_data["error"])
)
elif "errors" in err_data:
api_errors = err_data["errors"]
# Build Salt exception
errors = []
for error in err_data["errors"]:
if "field" in error:
errors.append(
"field '{}': {}".format(
error.get("field"), error.get("reason")
)
)
else:
errors.append(error.get("reason"))
raise SaltCloudSystemExit(
"Linode API reported error(s): {}".format(", ".join(errors))
)
# If the response is not valid JSON or the error was not included, propagate the
# human readable status representation.
raise SaltCloudSystemExit(
"Linode API error occurred: {}".format(err_response.reason)
)
if decode:
return self._get_response_json(result)
return result
def avail_images(self):
response = self._query(path="/images")
ret = {}
for image in response["data"]:
ret[image["id"]] = image
return ret
def avail_locations(self):
response = self._query(path="/regions")
ret = {}
for region in response["data"]:
ret[region["id"]] = region
return ret
def avail_sizes(self):
response = self._query(path="/linode/types")
ret = {}
for instance_type in response["data"]:
ret[instance_type["id"]] = instance_type
return ret
def boot(self, name=None, kwargs=None):
instance = self.get_linode(
kwargs={"linode_id": kwargs.get("linode_id", None), "name": name}
)
config_id = kwargs.get("config_id", None)
check_running = kwargs.get("check_running", True)
linode_id = instance.get("id", None)
name = instance.get("label", None)
if check_running:
if instance["status"] == "running":
raise SaltCloudSystemExit(
"Cannot boot Linode {0} ({1}). "
"Linode {0} is already running.".format(name, linode_id)
)
response = self._query(
"/linode/instances/{}/boot".format(linode_id),
method="POST",
data={"config_id": config_id},
)
self._wait_for_linode_status(linode_id, "running")
return True
def clone(self, kwargs=None):
linode_id = kwargs.get("linode_id", None)
location = kwargs.get("location", None)
size = kwargs.get("size", None)
if "datacenter_id" in kwargs:
log.warning(
"The 'datacenter_id' argument has been deprecated and will be "
"removed in future releases. Please use 'location' instead."
)
if "plan_id" in kwargs:
log.warning(
"The 'plan_id' argument has been deprecated and will be "
"removed in future releases. Please use 'size' instead."
)
for item in [linode_id, location, size]:
if item is None:
raise SaltCloudSystemExit(
"The clone function requires a 'linode_id', 'location',"
"and 'size' to be provided."
)
return self._query(
"/linode/instances/{}/clone".format(linode_id),
method="POST",
data={"region": location, "type": size},
)
def create_config(self, kwargs=None):
name = kwargs.get("name", None)
linode_id = kwargs.get("linode_id", None)
root_disk_id = kwargs.get("root_disk_id", None)
swap_disk_id = kwargs.get("swap_disk_id", None)
data_disk_id = kwargs.get("data_disk_id", None)
if not name and not linode_id:
raise SaltCloudSystemExit(
"The create_config function requires either a 'name' or 'linode_id'"
)
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
"The create_config functions requires a 'name', 'linode_id', "
"'root_disk_id', and 'swap_disk_id'."
)
devices = {
"sda": {"disk_id": int(root_disk_id)},
"sdb": {"disk_id": int(data_disk_id)} if data_disk_id is not None else None,
"sdc": {"disk_id": int(swap_disk_id)},
}
return self._query(
"/linode/instances/{}/configs".format(linode_id),
method="POST",
data={"label": name, "devices": devices},
)
def create(self, vm_):
name = vm_["name"]
if not _validate_name(name):
return False
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(name),
args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.info("Creating Cloud VM %s", name)
result = None
pub_ssh_keys = _get_ssh_keys(vm_)
ssh_interface = _get_ssh_interface(vm_)
use_private_ip = ssh_interface == "private_ips"
assign_private_ip = _get_private_ip(vm_) or use_private_ip
password = _get_password(vm_)
swap_size = _get_swap_size(vm_)
clonefrom_name = vm_.get("clonefrom", None)
instance_type = vm_.get("size", None)
image = vm_.get("image", None)
should_clone = True if clonefrom_name else False
if should_clone:
# clone into new linode
clone_linode = self.get_linode(kwargs={"name": clonefrom_name})
result = clone(
{
"linode_id": clone_linode["id"],
"location": clone_linode["region"],
"size": clone_linode["type"],
}
)
# create private IP if needed
if assign_private_ip:
self._query(
"/networking/ips",
method="POST",
data={"type": "ipv4", "public": False, "linode_id": result["id"]},
)
else:
# create new linode
result = self._query(
"/linode/instances",
method="POST",
data={
"label": name,
"type": instance_type,
"region": vm_.get("location", None),
"private_ip": assign_private_ip,
"booted": True,
"root_pass": password,
"authorized_keys": pub_ssh_keys,
"image": image,
"swap_size": swap_size,
},
)
linode_id = result.get("id", None)
# wait for linode to be created
self._wait_for_event("linode_create", "linode", linode_id, "finished")
log.debug("linode '%s' has been created", name)
if should_clone:
self.boot(kwargs={"linode_id": linode_id})
# wait for linode to finish booting
self._wait_for_linode_status(linode_id, "running")
public_ips, private_ips = self._get_ips(linode_id)
data = {}
data["id"] = linode_id
data["name"] = result["label"]
data["size"] = result["type"]
data["state"] = result["status"]
data["ipv4"] = result["ipv4"]
data["ipv6"] = result["ipv6"]
data["public_ips"] = public_ips
data["private_ips"] = private_ips
if use_private_ip:
vm_["ssh_host"] = private_ips[0]
else:
vm_["ssh_host"] = public_ips[0]
# Send event that the instance has booted.
__utils__["cloud.fire_event"](
"event",
"waiting for ssh",
"salt/cloud/{}/waiting_for_ssh".format(name),
sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"],
)
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(data)
log.info("Created Cloud VM '%s'", name)
log.debug("'%s' VM creation details:\n%s", name, pprint.pformat(data))
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(name),
args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def destroy(self, name):
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None)
self._query("/linode/instances/{}".format(linode_id), method="DELETE")
def get_config_id(self, kwargs=None):
name = kwargs.get("name", None)
linode_id = kwargs.get("linode_id", None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
"The get_config_id function requires either a 'name' or a 'linode_id' "
"to be provided."
)
if linode_id is None:
linode_id = self.get_linode(kwargs=kwargs).get("id", None)
response = self._query("/linode/instances/{}/configs".format(linode_id))
configs = response.get("data", [])
return {"config_id": configs[0]["id"]}
def list_nodes_min(self):
result = self._query("/linode/instances")
instances = result.get("data", [])
ret = {}
for instance in instances:
name = instance["label"]
ret[name] = {"id": instance["id"], "state": instance["status"]}
return ret
def list_nodes_full(self):
return self._list_linodes(full=True)
def list_nodes(self):
return self._list_linodes()
def reboot(self, name):
instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None)
self._query("/linode/instances/{}/reboot".format(linode_id), method="POST")
return self._wait_for_linode_status(linode_id, "running")
def show_instance(self, name):
instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None)
public_ips, private_ips = self._get_ips(linode_id)
return {
"id": instance["id"],
"image": instance["image"],
"name": instance["label"],
"size": instance["type"],
"state": instance["status"],
"public_ips": public_ips,
"private_ips": private_ips,
}
def show_pricing(self, kwargs=None):
profile = __opts__["profiles"].get(kwargs["profile"], {})
if not profile:
raise SaltCloudNotFound("The requested profile was not found.")
# Make sure the profile belongs to Linode
provider = profile.get("provider", "0:0")
comps = provider.split(":")
if len(comps) < 2 or comps[1] != "linode":
raise SaltCloudException("The requested profile does not belong to Linode.")
instance_type = self._get_linode_type(profile["size"])
pricing = instance_type.get("price", {})
per_hour = pricing["hourly"]
per_day = per_hour * 24
per_week = per_day * 7
per_month = pricing["monthly"]
per_year = per_month * 12
return {
profile["profile"]: {
"per_hour": per_hour,
"per_day": per_day,
"per_week": per_week,
"per_month": per_month,
"per_year": per_year,
}
}
def start(self, name):
instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None)
if instance["status"] == "running":
return {
"success": True,
"action": "start",
"state": "Running",
"msg": "Machine already running",
}
self._query("/linode/instances/{}/boot".format(linode_id), method="POST")
self._wait_for_linode_status(linode_id, "running")
return {
"success": True,
"state": "Running",
"action": "start",
}
def stop(self, name):
instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None)
if instance["status"] == "offline":
return {
"success": True,
"action": "stop",
"state": "Stopped",
"msg": "Machine already stopped",
}
self._query("/linode/instances/{}/shutdown".format(linode_id), method="POST")
self._wait_for_linode_status(linode_id, "offline")
return {"success": True, "state": "Stopped", "action": "stop"}
def _get_linode_by_id(self, linode_id):
return self._query("/linode/instances/{}".format(linode_id))
def _get_linode_by_name(self, name):
result = self._query("/linode/instances")
instances = result.get("data", [])
for instance in instances:
if instance["label"] == name:
return instance
raise SaltCloudNotFound(
"The specified name, {}, could not be found.".format(name)
)
def _list_linodes(self, full=False):
result = self._query("/linode/instances")
instances = result.get("data", [])
ret = {}
for instance in instances:
node = {}
node["id"] = instance["id"]
node["image"] = instance["image"]
node["name"] = instance["label"]
node["size"] = instance["type"]
node["state"] = instance["status"]
public_ips, private_ips = self._get_ips(node["id"])
node["public_ips"] = public_ips
node["private_ips"] = private_ips
if full:
node["extra"] = instance
ret[instance["label"]] = node
return ret
def _get_linode_type(self, linode_type):
return self._query("/linode/types/{}".format(linode_type))
def _get_ips(self, linode_id):
instance = self._get_linode_by_id(linode_id)
public = []
private = []
for addr in instance.get("ipv4", []):
if ipaddress.ip_address(addr).is_private:
private.append(addr)
else:
public.append(addr)
return (public, private)
def _poll(
self,
description,
getter,
condition,
timeout=None,
poll_interval=None,
):
"""
Return true in handler to signal complete.
"""
if poll_interval is None:
poll_interval = _get_poll_interval()
if timeout is None:
timeout = 120
times = (timeout * 1000) / poll_interval
curr = 0
while True:
curr += 1
result = getter()
if condition(result):
return True
elif curr <= times:
time.sleep(poll_interval / 1000)
log.info("retrying: polling for %s...", description)
else:
raise SaltCloudException(
"timed out: polling for {}".format(description)
)
def _wait_for_entity_status(
self, getter, status, entity_name="item", identifier="some", timeout=None
):
return self._poll(
"{} (id={}) status to be '{}'".format(entity_name, identifier, status),
getter,
lambda item: item.get("status") == status,
timeout=timeout,
)
def _wait_for_linode_status(self, linode_id, status, timeout=None):
return self._wait_for_entity_status(
lambda: self._get_linode_by_id(linode_id),
status,
entity_name="linode",
identifier=linode_id,
timeout=timeout,
)
def _check_event_status(self, event, desired_status):
status = event.get("status")
action = event.get("action")
entity = event.get("entity")
if status == "failed":
raise SaltCloudSystemExit(
"event {} for {} (id={}) failed".format(
action, entity["type"], entity["id"]
)
)
return status == desired_status
def _wait_for_event(self, action, entity, entity_id, status, timeout=None):
event_filter = {
"+order_by": "created",
"+order": "desc",
"seen": False,
"action": action,
"entity.id": entity_id,
"entity.type": entity,
}
last_event = None
condition = lambda event: self._check_event_status(event, status)
while True:
if last_event is not None:
event_filter["+gt"] = last_event
filter_json = json.dumps(event_filter, separators=(",", ":"))
result = self._query("/account/events", headers={"X-Filter": filter_json})
events = result.get("data", [])
if len(events) == 0:
break
for event in events:
event_id = event.get("id")
event_entity = event.get("entity", None)
last_event = event_id
if not event_entity:
continue
if not (
event_entity["type"] == entity
and event_entity["id"] == entity_id
and event.get("action") == action
):
continue
if condition(event):
return True
return self._poll(
"event {} to be '{}'".format(event_id, status),
lambda: self._query("/account/events/{}".format(event_id)),
condition,
timeout=timeout,
)
return False
def _get_response_json(self, response):
json = None
try:
json = response.json()
except ValueError:
pass
return json
class LinodeAPIv3(LinodeAPI):
def __init__(self):
_warn_for_api_v3()
def _query(
self,
action=None,
command=None,
args=None,
method="GET",
header_dict=None,
data=None,
url="https://api.linode.com/",
):
"""
Make a web call to the Linode API.
"""
global LASTCALL
ratelimit_sleep = _get_ratelimit_sleep()
apikey = _get_api_key()
if not isinstance(args, dict):
args = {}
if "api_key" not in args.keys():
args["api_key"] = apikey
if action and "api_action" not in args.keys():
args["api_action"] = "{}.{}".format(action, command)
if header_dict is None:
header_dict = {}
if method != "POST":
header_dict["Accept"] = "application/json"
decode = True
if method == "DELETE":
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__["http.query"](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type="json",
text=True,
status=True,
hide_fields=["api_key", "rootPass"],
opts=__opts__,
)
if "ERRORARRAY" in result["dict"]:
if result["dict"]["ERRORARRAY"]:
error_list = []
for error in result["dict"]["ERRORARRAY"]:
msg = error["ERRORMESSAGE"]
if msg == "Authentication failed":
raise SaltCloudSystemExit(
"Linode API Key is expired or invalid"
)
else:
error_list.append(msg)
raise SaltCloudException(
"Linode API reported error(s): {}".format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug("Linode Response Status Code: %s", result["status"])
return result["dict"]
def avail_images(self):
response = self._query("avail", "distributions")
ret = {}
for item in response["DATA"]:
name = item["LABEL"]
ret[name] = item
return ret
def avail_locations(self):
response = self._query("avail", "datacenters")
ret = {}
for item in response["DATA"]:
name = item["LOCATION"]
ret[name] = item
return ret
def avail_sizes(self):
response = self._query("avail", "LinodePlans")
ret = {}
for item in response["DATA"]:
name = item["LABEL"]
ret[name] = item
return ret
def boot(self, name=None, kwargs=None):
linode_id = kwargs.get("linode_id", None)
config_id = kwargs.get("config_id", None)
check_running = kwargs.get("check_running", True)
if config_id is None:
raise SaltCloudSystemExit("The boot function requires a 'config_id'.")
if linode_id is None:
linode_id = self._get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running:
status = get_linode(kwargs={"linode_id": linode_id})["STATUS"]
if status == "1":
raise SaltCloudSystemExit(
"Cannot boot Linode {0}. "
+ "Linode {} is already running.".format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = self._query(
"linode", "boot", args={"LinodeID": linode_id, "ConfigID": config_id}
)["DATA"]
boot_job_id = response["JobID"]
if not self._wait_for_job(linode_id, boot_job_id):
log.error("Boot failed for Linode %s.", linode_item)
return False
return True
def clone(self, kwargs=None):
linode_id = kwargs.get("linode_id", None)
datacenter_id = kwargs.get("datacenter_id", kwargs.get("location"))
plan_id = kwargs.get("plan_id", kwargs.get("size"))
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
"The clone function requires a 'linode_id', 'datacenter_id', "
"and 'plan_id' to be provided."
)
clone_args = {
"LinodeID": linode_id,
"DatacenterID": datacenter_id,
"PlanID": plan_id,
}
return self._query("linode", "clone", args=clone_args)
def create(self, vm_):
name = vm_["name"]
if not _validate_name(name):
return False
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(name),
args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.info("Creating Cloud VM %s", name)
data = {}
kwargs = {"name": name}
plan_id = None
size = vm_.get("size")
if size:
kwargs["size"] = size
plan_id = self.get_plan_id(kwargs={"label": size})
datacenter_id = None
location = vm_.get("location")
if location:
try:
datacenter_id = self._get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get("clonefrom")
cloning = True if clonefrom_name else False
if cloning:
linode_id = self._get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={"linode_id": linode_id})
kwargs = {
"clonefrom": clonefrom_name,
"image": "Clone of {}".format(clonefrom_name),
}
if size is None:
size = clone_source["TOTALRAM"]
kwargs["size"] = size
plan_id = clone_source["PLANID"]
if location is None:
datacenter_id = clone_source["DATACENTERID"]
# Create new Linode from cloned Linode
try:
result = clone(
kwargs={
"linode_id": linode_id,
"datacenter_id": datacenter_id,
"plan_id": plan_id,
}
)
except Exception as err: # pylint: disable=broad-except
log.error(
"Error cloning '%s' on Linode.\n\n"
"The following exception was thrown by Linode when trying to "
"clone the specified machine:\n%s",
clonefrom_name,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return False
else:
kwargs["image"] = vm_["image"]
# Create Linode
try:
result = self._query(
"linode",
"create",
args={"PLANID": plan_id, "DATACENTERID": datacenter_id},
)
except Exception as err: # pylint: disable=broad-except
log.error(
"Error creating %s on Linode\n\n"
"The following exception was thrown by Linode when trying to "
"run the initial deployment:\n%s",
name,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return False
if "ERRORARRAY" in result:
for error_data in result["ERRORARRAY"]:
log.error(
"Error creating %s on Linode\n\n"
"The Linode API returned the following: %s\n",
name,
error_data["ERRORMESSAGE"],
)
return False
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{}/requesting".format(name),
args=__utils__["cloud.filter_event"](
"requesting", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
node_id = self._clean_data(result)["LinodeID"]
data["id"] = node_id
if not self._wait_for_status(
node_id, status=(self._get_status_id_by_name("brand_new"))
):
log.error(
"Error creating %s on LINODE\n\nwhile waiting for initial ready status",
name,
exc_info_on_loglevel=logging.DEBUG,
)
# Update the Linode's Label to reflect the given VM name
self._update_linode(node_id, update_args={"Label": name})
log.debug("Set name for %s - was linode%s.", name, node_id)
# Add private IP address if requested
private_ip_assignment = _get_private_ip(vm_)
if private_ip_assignment:
self._create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == "private_ips" and private_ip_assignment is False:
self._create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={"linode_id": node_id})["config_id"]
else:
# Create disks and get ids
log.debug("Creating disks for %s", name)
root_disk_id = self._create_disk_from_distro(vm_, node_id)["DiskID"]
swap_disk_id = self._create_swap_disk(vm_, node_id)["DiskID"]
# Create a ConfigID using disk ids
config_id = create_config(
kwargs={
"name": name,
"linode_id": node_id,
"root_disk_id": root_disk_id,
"swap_disk_id": swap_disk_id,
}
)["ConfigID"]
# Boot the Linode
self.boot(
kwargs={
"linode_id": node_id,
"config_id": config_id,
"check_running": False,
}
)
node_data = get_linode(kwargs={"linode_id": node_id})
ips = self._get_ips(node_id)
state = int(node_data["STATUS"])
data["image"] = kwargs["image"]
data["name"] = name
data["size"] = size
data["state"] = self._get_status_descr_by_id(state)
data["private_ips"] = ips["private_ips"]
data["public_ips"] = ips["public_ips"]
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == "private_ips":
vm_["ssh_host"] = data["private_ips"][0]
else:
vm_["ssh_host"] = data["public_ips"][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_["password"] = _get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_["public_ips"] = ips["public_ips"]
vm_["private_ips"] = ips["private_ips"]
# Send event that the instance has booted.
__utils__["cloud.fire_event"](
"event",
"waiting for ssh",
"salt/cloud/{}/waiting_for_ssh".format(name),
sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"],
)
# Bootstrap!
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(data)
log.info("Created Cloud VM '%s'", name)
log.debug("'%s' VM creation details:\n%s", name, pprint.pformat(data))
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(name),
args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def create_config(self, kwargs=None):
name = kwargs.get("name", None)
linode_id = kwargs.get("linode_id", None)
root_disk_id = kwargs.get("root_disk_id", None)
swap_disk_id = kwargs.get("swap_disk_id", None)
data_disk_id = kwargs.get("data_disk_id", None)
kernel_id = kwargs.get("kernel_id", None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
"The create_config functions requires a 'name', 'linode_id', "
"'root_disk_id', and 'swap_disk_id'."
)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
if not linode_id:
instance = self._get_linode_by_name(name)
linode_id = instance.get("id", None)
disklist = "{},{}".format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = "{},{},{}".format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {
"LinodeID": int(linode_id),
"KernelID": int(kernel_id),
"Label": name,
"DiskList": disklist,
}
result = self._query("linode", "config.create", args=config_args)
return result.get("DATA", None)
def _create_disk_from_distro(self, vm_, linode_id):
kwargs = {}
swap_size = _get_swap_size(vm_)
pub_key = _get_ssh_key(vm_)
root_password = _get_password(vm_)
if pub_key:
kwargs.update({"rootSSHKey": pub_key})
if root_password:
kwargs.update({"rootPass": root_password})
else:
raise SaltCloudConfigError("The Linode driver requires a password.")
kwargs.update(
{
"LinodeID": linode_id,
"DistributionID": self._get_distribution_id(vm_),
"Label": vm_["name"],
"Size": self._get_disk_size(vm_, swap_size, linode_id),
}
)
result = self._query("linode", "disk.createfromdistribution", args=kwargs)
return self._clean_data(result)
def _create_swap_disk(self, vm_, linode_id, swap_size=None):
r"""
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
"""
kwargs = {}
if not swap_size:
swap_size = _get_swap_size(vm_)
kwargs.update(
{
"LinodeID": linode_id,
"Label": vm_["name"],
"Type": "swap",
"Size": swap_size,
}
)
result = self._query("linode", "disk.create", args=kwargs)
return self._clean_data(result)
def _create_data_disk(self, vm_=None, linode_id=None, data_size=None):
kwargs = {}
kwargs.update(
{
"LinodeID": linode_id,
"Label": vm_["name"] + "_data",
"Type": "ext4",
"Size": data_size,
}
)
result = self._query("linode", "disk.create", args=kwargs)
return self._clean_data(result)
def _create_private_ip(self, linode_id):
r"""
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
"""
kwargs = {"LinodeID": linode_id}
result = self._query("linode", "ip.addprivate", args=kwargs)
return self._clean_data(result)
def destroy(self, name):
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
linode_id = self._get_linode_id_from_name(name)
response = self._query(
"linode", "delete", args={"LinodeID": linode_id, "skipChecks": True}
)
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
return response
def _decode_linode_plan_label(self, label):
"""
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
"""
sizes = self.avail_sizes()
if label not in sizes:
if "GB" in label:
raise SaltCloudException(
"Invalid Linode plan ({}) specified - call avail_sizes() for all"
" available options".format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
"Invalid Linode plan ({}) specified - call avail_sizes() for"
" all available options".format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug(
"Failed to decode Linode plan label in Cloud Profile: %s", label
)
if plan_type == "Linode" and plan_size == 1024:
plan_type = "Nanode"
plan_size = plan_size / 1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
"Invalid Linode plan ({}) specified - call avail_sizes() for"
" all available options".format(new_label)
)
log.warning(
"An outdated Linode plan label was detected in your Cloud "
"Profile (%s). Please update the profile to use the new "
"label format (%s) for the requested Linode plan size.",
label,
new_label,
)
label = new_label
return sizes[label]["PLANID"]
def get_config_id(self, kwargs=None):
name = kwargs.get("name", None)
linode_id = kwargs.get("linode_id", None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
"The get_config_id function requires either a 'name' or a 'linode_id' "
"to be provided."
)
if linode_id is None:
linode_id = self._get_linode_id_from_name(name)
response = self._query("linode", "config.list", args={"LinodeID": linode_id})[
"DATA"
]
config_id = {"config_id": response[0]["ConfigID"]}
return config_id
def _get_datacenter_id(self, location):
"""
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
"""
return avail_locations()[location]["DATACENTERID"]
def _get_disk_size(self, vm_, swap, linode_id):
r"""
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
"""
disk_size = get_linode(kwargs={"linode_id": linode_id})["TOTALHD"]
return config.get_cloud_config_value(
"disk_size", vm_, __opts__, default=disk_size - swap
)
def _get_distribution_id(self, vm_):
r"""
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
"""
distributions = self._query("avail", "distributions")["DATA"]
vm_image_name = config.get_cloud_config_value("image", vm_, __opts__)
distro_id = ""
for distro in distributions:
if vm_image_name == distro["LABEL"]:
distro_id = distro["DISTRIBUTIONID"]
return distro_id
if not distro_id:
raise SaltCloudNotFound(
"The DistributionID for the '{}' profile could not be found.\nThe '{}'"
" instance could not be provisioned. The following distributions are"
" available:\n{}".format(
vm_image_name,
vm_["name"],
pprint.pprint(
sorted(
distro["LABEL"].encode(__salt_system_encoding__)
for distro in distributions
)
),
)
)
def get_plan_id(self, kwargs=None):
label = kwargs.get("label", None)
if label is None:
raise SaltCloudException("The get_plan_id function requires a 'label'.")
return self._decode_linode_plan_label(label)
def _get_ips(self, linode_id=None):
"""
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
"""
if linode_id:
ips = self._query("linode", "ip.list", args={"LinodeID": linode_id})
else:
ips = self._query("linode", "ip.list")
ips = ips["DATA"]
ret = {}
for item in ips:
node_id = str(item["LINODEID"])
if item["ISPUBLIC"] == 1:
key = "public_ips"
else:
key = "private_ips"
if ret.get(node_id) is None:
ret.update({node_id: {"public_ips": [], "private_ips": []}})
ret[node_id][key].append(item["IPADDRESS"])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {"public_ips": [], "private_ips": []}
matching_id = ret.get(str(linode_id))
if matching_id:
_all_ips["private_ips"] = matching_id["private_ips"]
_all_ips["public_ips"] = matching_id["public_ips"]
ret = _all_ips
return ret
def _wait_for_job(self, linode_id, job_id, timeout=300, quiet=True):
"""
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
"""
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = self._query(
"linode", "job.list", args={"LinodeID": linode_id}
)["DATA"]
if (
jobs_result[0]["JOBID"] == job_id
and jobs_result[0]["HOST_SUCCESS"] == 1
):
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
"Still waiting on Job %s for Linode %s.",
job_id,
linode_id,
)
return False
def _wait_for_status(self, linode_id, status=None, timeout=300, quiet=True):
"""
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
"""
if status is None:
status = self._get_status_id_by_name("brand_new")
status_desc_waiting = self._get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={"linode_id": linode_id})
if result["STATUS"] == status:
return True
status_desc_result = self._get_status_descr_by_id(result["STATUS"])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
"Status for Linode %s is '%s', waiting for '%s'.",
linode_id,
status_desc_result,
status_desc_waiting,
)
return False
def _list_linodes(self, full=False):
nodes = self._query("linode", "list")["DATA"]
ips = self._get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = str(node["LINODEID"])
this_node["id"] = linode_id
this_node["image"] = node["DISTRIBUTIONVENDOR"]
this_node["name"] = node["LABEL"]
this_node["size"] = node["TOTALRAM"]
state = int(node["STATUS"])
this_node["state"] = self._get_status_descr_by_id(state)
for key, val in ips.items():
if key == linode_id:
this_node["private_ips"] = val["private_ips"]
this_node["public_ips"] = val["public_ips"]
if full:
this_node["extra"] = node
ret[node["LABEL"]] = this_node
return ret
def list_nodes(self):
return self._list_linodes()
def list_nodes_full(self):
return self._list_linodes(full=True)
def list_nodes_min(self):
ret = {}
nodes = self._query("linode", "list")["DATA"]
for node in nodes:
name = node["LABEL"]
ret[name] = {
"id": str(node["LINODEID"]),
"state": self._get_status_descr_by_id(int(node["STATUS"])),
}
return ret
def show_instance(self, name):
node_id = self._get_linode_id_from_name(name)
node_data = get_linode(kwargs={"linode_id": node_id})
ips = self._get_ips(node_id)
state = int(node_data["STATUS"])
return {
"id": node_data["LINODEID"],
"image": node_data["DISTRIBUTIONVENDOR"],
"name": node_data["LABEL"],
"size": node_data["TOTALRAM"],
"state": self._get_status_descr_by_id(state),
"private_ips": ips["private_ips"],
"public_ips": ips["public_ips"],
}
def show_pricing(self, kwargs=None):
profile = __opts__["profiles"].get(kwargs["profile"], {})
if not profile:
raise SaltCloudNotFound("The requested profile was not found.")
# Make sure the profile belongs to Linode
provider = profile.get("provider", "0:0")
comps = provider.split(":")
if len(comps) < 2 or comps[1] != "linode":
raise SaltCloudException("The requested profile does not belong to Linode.")
plan_id = self.get_plan_id(kwargs={"label": profile["size"]})
response = self._query("avail", "linodeplans", args={"PlanID": plan_id})[
"DATA"
][0]
ret = {}
ret["per_hour"] = response["HOURLY"]
ret["per_day"] = ret["per_hour"] * 24
ret["per_week"] = ret["per_day"] * 7
ret["per_month"] = response["PRICE"]
ret["per_year"] = ret["per_month"] * 12
return {profile["profile"]: ret}
def _update_linode(self, linode_id, update_args=None):
update_args.update({"LinodeID": linode_id})
result = self._query("linode", "update", args=update_args)
return self._clean_data(result)
def _get_linode_id_from_name(self, name):
node = self._get_linode_by_name(name)
return node.get("LINODEID", None)
def _get_linode_by_name(self, name):
nodes = self._query("linode", "list")["DATA"]
for node in nodes:
if name == node["LABEL"]:
return node
raise SaltCloudNotFound(
"The specified name, {}, could not be found.".format(name)
)
def _get_linode_by_id(self, linode_id):
result = self._query("linode", "list", args={"LinodeID": linode_id})
return result["DATA"][0]
def start(self, name):
node_id = self._get_linode_id_from_name(name)
node = get_linode(kwargs={"linode_id": node_id})
if node["STATUS"] == 1:
return {
"success": True,
"action": "start",
"state": "Running",
"msg": "Machine already running",
}
response = self._query("linode", "boot", args={"LinodeID": node_id})["DATA"]
if self._wait_for_job(node_id, response["JobID"]):
return {"state": "Running", "action": "start", "success": True}
else:
return {"action": "start", "success": False}
def stop(self, name):
node_id = self._get_linode_id_from_name(name)
node = get_linode(kwargs={"linode_id": node_id})
if node["STATUS"] == 2:
return {
"success": True,
"state": "Stopped",
"msg": "Machine already stopped",
}
response = self._query("linode", "shutdown", args={"LinodeID": node_id})["DATA"]
if self._wait_for_job(node_id, response["JobID"]):
return {"state": "Stopped", "action": "stop", "success": True}
return {"action": "stop", "success": False}
def reboot(self, name):
node_id = self._get_linode_id_from_name(name)
response = self._query("linode", "reboot", args={"LinodeID": node_id})
data = self._clean_data(response)
reboot_jid = data["JobID"]
if not self._wait_for_job(node_id, reboot_jid):
log.error("Reboot failed for %s.", name)
return False
return data
def _clean_data(self, api_response):
"""
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
"""
data = {}
data.update(api_response["DATA"])
if not data:
response_data = api_response["DATA"]
data.update(response_data)
return data
def _get_status_descr_by_id(self, status_id):
"""
Return linode status by ID
status_id
linode VM status ID
"""
for status_name, status_data in LINODE_STATUS.items():
if status_data["code"] == int(status_id):
return status_data["descr"]
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(self, status_name):
"""
Return linode status description by internalstatus name
status_name
internal linode VM status name
"""
return LINODE_STATUS.get(status_name, {}).get("code", None)
def avail_images(call=None):
"""
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
"""
if call == "action":
raise SaltCloudException(
"The avail_images function must be called with -f or --function."
)
return _get_cloud_interface().avail_images()
def avail_locations(call=None):
"""
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
"""
if call == "action":
raise SaltCloudException(
"The avail_locations function must be called with -f or --function."
)
return _get_cloud_interface().avail_locations()
def avail_sizes(call=None):
"""
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
"""
if call == "action":
raise SaltCloudException(
"The avail_locations function must be called with -f or --function."
)
return _get_cloud_interface().avail_sizes()
def boot(name=None, kwargs=None, call=None):
"""
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
"""
if name is None and call == "action":
raise SaltCloudSystemExit("The boot action requires a 'name'.")
linode_id = kwargs.get("linode_id", None)
config_id = kwargs.get("config_id", None)
if call == "function":
name = kwargs.get("name", None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
"The boot function requires either a 'name' or a 'linode_id'."
)
return _get_cloud_interface().boot(name=name, kwargs=kwargs)
def clone(kwargs=None, call=None):
"""
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
location
The location of the new Linode. Required.
size
The size of the new Linode (must be greater than or equal to the clone source). Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required for APIv3 usage.
Deprecated. Use ``location`` instead.
plan_id
The ID of the plan (size) of the Linode. Required. Required for APIv3 usage.
Deprecated. Use ``size`` instead.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
"""
if call == "action":
raise SaltCloudSystemExit(
"The clone function must be called with -f or --function."
)
return _get_cloud_interface().clone(kwargs=kwargs)
def create(vm_):
"""
Create a single Linode VM.
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_["profile"]
and config.is_profile_configured(
__opts__,
_get_active_provider_name() or "linode",
vm_["profile"],
vm_=vm_,
)
is False
):
return False
except AttributeError:
pass
return _get_cloud_interface().create(vm_)
def create_config(kwargs=None, call=None):
"""
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
"""
if call == "action":
raise SaltCloudSystemExit(
"The create_config function must be called with -f or --function."
)
return _get_cloud_interface().create_config(kwargs=kwargs)
def destroy(name, call=None):
"""
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
"""
if call == "function":
raise SaltCloudException(
"The destroy action must be called with -d, --destroy, -a or --action."
)
return _get_cloud_interface().destroy(name)
def get_config_id(kwargs=None, call=None):
"""
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
"""
if call == "action":
raise SaltCloudException(
"The get_config_id function must be called with -f or --function."
)
return _get_cloud_interface().get_config_id(kwargs=kwargs)
def get_linode(kwargs=None, call=None):
"""
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
"""
if call == "action":
raise SaltCloudSystemExit(
"The get_linode function must be called with -f or --function."
)
return _get_cloud_interface().get_linode(kwargs=kwargs)
def get_plan_id(kwargs=None, call=None):
"""
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Nanode 1GB"
salt-cloud -f get_plan_id linode label="Linode 2GB"
"""
if call == "action":
raise SaltCloudException(
"The show_instance action must be called with -f or --function."
)
return _get_cloud_interface().get_plan_id(kwargs=kwargs)
def list_nodes(call=None):
"""
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
"""
if call == "action":
raise SaltCloudException(
"The list_nodes function must be called with -f or --function."
)
return _get_cloud_interface().list_nodes()
def list_nodes_full(call=None):
"""
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
"""
if call == "action":
raise SaltCloudException(
"The list_nodes_full function must be called with -f or --function."
)
return _get_cloud_interface().list_nodes_full()
def list_nodes_min(call=None):
"""
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes_min function must be called with -f or --function."
)
return _get_cloud_interface().list_nodes_min()
def list_nodes_select(call=None):
"""
Return a list of the VMs that are on the provider, with select fields.
"""
return _get_cloud_interface().list_nodes_select(call)
def reboot(name, call=None):
"""
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
"""
if call != "action":
raise SaltCloudException(
"The show_instance action must be called with -a or --action."
)
return _get_cloud_interface().reboot(name)
def show_instance(name, call=None):
"""
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
"""
if call != "action":
raise SaltCloudException(
"The show_instance action must be called with -a or --action."
)
return _get_cloud_interface().show_instance(name)
def show_pricing(kwargs=None, call=None):
"""
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
"""
if call != "function":
raise SaltCloudException(
"The show_instance action must be called with -f or --function."
)
return _get_cloud_interface().show_pricing(kwargs=kwargs)
def start(name, call=None):
"""
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
"""
if call != "action":
raise SaltCloudException("The start action must be called with -a or --action.")
return _get_cloud_interface().start(name)
def stop(name, call=None):
"""
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
"""
if call != "action":
raise SaltCloudException("The stop action must be called with -a or --action.")
return _get_cloud_interface().stop(name) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/cloud/clouds/linode.py | 0.83825 | 0.649328 | linode.py | pypi |
import logging
import os
import pprint
import time
import salt.config as config
import salt.utils.cloud
import salt.utils.files
import salt.utils.stringutils
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
SaltCloudNotFound,
SaltCloudSystemExit,
)
from salt.utils.versions import LooseVersion
try:
# pylint: disable=no-name-in-module
import profitbricks
from profitbricks.client import (
ProfitBricksService,
Server,
NIC,
Volume,
FirewallRule,
IPBlock,
Datacenter,
LoadBalancer,
LAN,
PBNotFoundError,
PBError,
)
# pylint: enable=no-name-in-module
HAS_PROFITBRICKS = True
except ImportError:
HAS_PROFITBRICKS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = "profitbricks"
# Only load in this module if the ProfitBricks configurations are in place
def __virtual__():
"""
Check for ProfitBricks configurations.
"""
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("username", "password", "datacenter_id"),
)
def version_compatible(version):
"""
Checks profitbricks version
"""
return LooseVersion(profitbricks.API_VERSION) >= LooseVersion(version)
def get_dependencies():
"""
Warn if dependencies are not met.
"""
return config.check_driver_dependencies(
__virtualname__, {"profitbricks": HAS_PROFITBRICKS}
)
def get_conn():
"""
Return a conn object for the passed VM data
"""
return ProfitBricksService(
username=config.get_cloud_config_value(
"username", get_configured_provider(), __opts__, search_global=False
),
password=config.get_cloud_config_value(
"password", get_configured_provider(), __opts__, search_global=False
),
)
def avail_locations(call=None):
"""
Return a dict of all available VM locations on the cloud provider with
relevant data
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
"-f or --function, or with the --list-locations option"
)
ret = {}
conn = get_conn()
for item in conn.list_locations()["items"]:
reg, loc = item["id"].split("/")
location = {"id": item["id"]}
if reg not in ret:
ret[reg] = {}
ret[reg][loc] = location
return ret
def avail_images(call=None):
"""
Return a list of the images that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
"-f or --function, or with the --list-images option"
)
ret = {}
conn = get_conn()
for item in conn.list_images()["items"]:
image = {"id": item["id"]}
image.update(item["properties"])
ret[image["name"]] = image
return ret
def list_images(call=None, kwargs=None):
"""
List all the images with alias by location
CLI Example:
.. code-block:: bash
salt-cloud -f list_images my-profitbricks-config location=us/las
"""
if call != "function":
raise SaltCloudSystemExit(
"The list_images function must be called with -f or --function."
)
if not version_compatible("4.0"):
raise SaltCloudNotFound(
"The 'image_alias' feature requires the profitbricks SDK v4.0.0 or greater."
)
ret = {}
conn = get_conn()
if kwargs.get("location") is not None:
item = conn.get_location(kwargs.get("location"), 3)
ret[item["id"]] = {"image_alias": item["properties"]["imageAliases"]}
return ret
for item in conn.list_locations(3)["items"]:
ret[item["id"]] = {"image_alias": item["properties"]["imageAliases"]}
return ret
def avail_sizes(call=None):
"""
Return a dict of all available VM sizes on the cloud provider with
relevant data. Latest version can be found at:
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_sizes function must be called with "
"-f or --function, or with the --list-sizes option"
)
sizes = {
"Micro Instance": {"id": "1", "ram": 1024, "disk": 50, "cores": 1},
"Small Instance": {"id": "2", "ram": 2048, "disk": 50, "cores": 1},
"Medium Instance": {"id": "3", "ram": 4096, "disk": 50, "cores": 2},
"Large Instance": {"id": "4", "ram": 7168, "disk": 50, "cores": 4},
"Extra Large Instance": {"id": "5", "ram": 14336, "disk": 50, "cores": 8},
"Memory Intensive Instance Medium": {
"id": "6",
"ram": 28672,
"disk": 50,
"cores": 4,
},
"Memory Intensive Instance Large": {
"id": "7",
"ram": 57344,
"disk": 50,
"cores": 8,
},
}
return sizes
def get_size(vm_):
"""
Return the VM's size object
"""
vm_size = config.get_cloud_config_value("size", vm_, __opts__)
sizes = avail_sizes()
if not vm_size:
return sizes["Small Instance"]
for size in sizes:
combinations = (str(sizes[size]["id"]), str(size))
if vm_size and str(vm_size) in combinations:
return sizes[size]
raise SaltCloudNotFound(
"The specified size, '{}', could not be found.".format(vm_size)
)
def get_datacenter_id():
"""
Return datacenter ID from provider configuration
"""
datacenter_id = config.get_cloud_config_value(
"datacenter_id", get_configured_provider(), __opts__, search_global=False
)
conn = get_conn()
try:
conn.get_datacenter(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error("Failed to get datacenter: %s", datacenter_id)
raise
return datacenter_id
def list_loadbalancers(call=None):
"""
Return a list of the loadbalancers that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
"-f or --function, or with the --list-loadbalancers option"
)
ret = {}
conn = get_conn()
datacenter = get_datacenter(conn)
for item in conn.list_loadbalancers(datacenter["id"])["items"]:
lb = {"id": item["id"]}
lb.update(item["properties"])
ret[lb["name"]] = lb
return ret
def create_loadbalancer(call=None, kwargs=None):
"""
Creates a loadbalancer within the datacenter from the provider config.
CLI Example:
.. code-block:: bash
salt-cloud -f create_loadbalancer profitbricks name=mylb
"""
if call != "function":
raise SaltCloudSystemExit(
"The create_address function must be called with -f or --function."
)
if kwargs is None:
kwargs = {}
conn = get_conn()
datacenter_id = get_datacenter_id()
loadbalancer = LoadBalancer(
name=kwargs.get("name"), ip=kwargs.get("ip"), dhcp=kwargs.get("dhcp")
)
response = conn.create_loadbalancer(datacenter_id, loadbalancer)
_wait_for_completion(conn, response, 60, "loadbalancer")
return response
def get_datacenter(conn):
"""
Return the datacenter from the config provider datacenter ID
"""
datacenter_id = get_datacenter_id()
for item in conn.list_datacenters()["items"]:
if item["id"] == datacenter_id:
return item
raise SaltCloudNotFound(
"The specified datacenter '{}' could not be found.".format(datacenter_id)
)
def create_datacenter(call=None, kwargs=None):
"""
Creates a virtual datacenter based on supplied parameters.
CLI Example:
.. code-block:: bash
salt-cloud -f create_datacenter profitbricks name=mydatacenter
location=us/las description="my description"
"""
if call != "function":
raise SaltCloudSystemExit(
"The create_address function must be called with -f or --function."
)
if kwargs is None:
kwargs = {}
if kwargs.get("name") is None:
raise SaltCloudExecutionFailure('The "name" parameter is required')
if kwargs.get("location") is None:
raise SaltCloudExecutionFailure('The "location" parameter is required')
conn = get_conn()
datacenter = Datacenter(
name=kwargs["name"],
location=kwargs["location"],
description=kwargs.get("description"),
)
response = conn.create_datacenter(datacenter)
_wait_for_completion(conn, response, 60, "create_datacenter")
return response
def get_disk_type(vm_):
"""
Return the type of disk to use. Either 'HDD' (default) or 'SSD'.
"""
return config.get_cloud_config_value(
"disk_type", vm_, __opts__, default="HDD", search_global=False
)
def get_wait_timeout(vm_):
"""
Return the wait_for_timeout for resource provisioning.
"""
return config.get_cloud_config_value(
"wait_for_timeout", vm_, __opts__, default=15 * 60, search_global=False
)
def get_image(vm_):
"""
Return the image object to use
"""
vm_image = config.get_cloud_config_value("image", vm_, __opts__).encode(
"ascii", "salt-cloud-force-ascii"
)
images = avail_images()
for key in images:
if vm_image and vm_image in (images[key]["id"], images[key]["name"]):
return images[key]
raise SaltCloudNotFound(
"The specified image, '{}', could not be found.".format(vm_image)
)
def list_datacenters(conn=None, call=None):
"""
List all the data centers
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-profitbricks-config
"""
if call != "function":
raise SaltCloudSystemExit(
"The list_datacenters function must be called with -f or --function."
)
datacenters = []
if not conn:
conn = get_conn()
for item in conn.list_datacenters()["items"]:
datacenter = {"id": item["id"]}
datacenter.update(item["properties"])
datacenters.append({item["properties"]["name"]: datacenter})
return {"Datacenters": datacenters}
def list_nodes(conn=None, call=None):
"""
Return a list of VMs that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes function must be called with -f or --function."
)
if not conn:
conn = get_conn()
ret = {}
datacenter_id = get_datacenter_id()
try:
nodes = conn.list_servers(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error("Failed to get nodes list from datacenter: %s", datacenter_id)
raise
for item in nodes["items"]:
node = {"id": item["id"]}
node.update(item["properties"])
node["state"] = node.pop("vmState")
ret[node["name"]] = node
return ret
def list_nodes_full(conn=None, call=None):
"""
Return a list of the VMs that are on the provider, with all fields
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes_full function must be called with -f or --function."
)
if not conn:
conn = get_conn() # pylint: disable=E0602
ret = {}
datacenter_id = get_datacenter_id()
nodes = conn.list_servers(datacenter_id=datacenter_id, depth=3)
for item in nodes["items"]:
node = {"id": item["id"]}
node.update(item["properties"])
node["state"] = node.pop("vmState")
node["public_ips"] = []
node["private_ips"] = []
if item["entities"]["nics"]["items"] > 0:
for nic in item["entities"]["nics"]["items"]:
if nic["properties"]["ips"]:
pass
ip_address = nic["properties"]["ips"][0]
if salt.utils.cloud.is_public_ip(ip_address):
node["public_ips"].append(ip_address)
else:
node["private_ips"].append(ip_address)
ret[node["name"]] = node
__utils__["cloud.cache_node_list"](
ret, _get_active_provider_name().split(":")[0], __opts__
)
return ret
def reserve_ipblock(call=None, kwargs=None):
"""
Reserve the IP Block
"""
if call == "action":
raise SaltCloudSystemExit(
"The reserve_ipblock function must be called with -f or --function."
)
conn = get_conn()
if kwargs is None:
kwargs = {}
ret = {}
ret["ips"] = []
if kwargs.get("location") is None:
raise SaltCloudExecutionFailure('The "location" parameter is required')
location = kwargs.get("location")
size = 1
if kwargs.get("size") is not None:
size = kwargs.get("size")
block = conn.reserve_ipblock(IPBlock(size=size, location=location))
for item in block["properties"]["ips"]:
ret["ips"].append(item)
return ret
def show_instance(name, call=None):
"""
Show the details from the provider concerning an instance
"""
if call != "action":
raise SaltCloudSystemExit(
"The show_instance action must be called with -a or --action."
)
nodes = list_nodes_full()
__utils__["cloud.cache_node"](nodes[name], _get_active_provider_name(), __opts__)
return nodes[name]
def get_node(conn, name):
"""
Return a node for the named VM
"""
datacenter_id = get_datacenter_id()
for item in conn.list_servers(datacenter_id)["items"]:
if item["properties"]["name"] == name:
node = {"id": item["id"]}
node.update(item["properties"])
return node
def ssh_interface(vm_):
"""
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
"""
return config.get_cloud_config_value(
"ssh_interface", vm_, __opts__, default="public_ips", search_global=False
)
def _get_nics(vm_):
"""
Create network interfaces on appropriate LANs as defined in cloud profile.
"""
nics = []
if "public_lan" in vm_:
firewall_rules = []
# Set LAN to public if it already exists, otherwise create a new
# public LAN.
if "public_firewall_rules" in vm_:
firewall_rules = _get_firewall_rules(vm_["public_firewall_rules"])
nic = NIC(
lan=set_public_lan(int(vm_["public_lan"])),
name="public",
firewall_rules=firewall_rules,
)
if "public_ips" in vm_:
nic.ips = _get_ip_addresses(vm_["public_ips"])
nics.append(nic)
if "private_lan" in vm_:
firewall_rules = []
if "private_firewall_rules" in vm_:
firewall_rules = _get_firewall_rules(vm_["private_firewall_rules"])
nic = NIC(
lan=int(vm_["private_lan"]), name="private", firewall_rules=firewall_rules
)
if "private_ips" in vm_:
nic.ips = _get_ip_addresses(vm_["private_ips"])
if "nat" in vm_ and "private_ips" not in vm_:
nic.nat = vm_["nat"]
nics.append(nic)
return nics
def set_public_lan(lan_id):
"""
Enables public Internet access for the specified public_lan. If no public
LAN is available, then a new public LAN is created.
"""
conn = get_conn()
datacenter_id = get_datacenter_id()
try:
lan = conn.get_lan(datacenter_id=datacenter_id, lan_id=lan_id)
if not lan["properties"]["public"]:
conn.update_lan(datacenter_id=datacenter_id, lan_id=lan_id, public=True)
return lan["id"]
except Exception: # pylint: disable=broad-except
lan = conn.create_lan(datacenter_id, LAN(public=True, name="Public LAN"))
return lan["id"]
def get_public_keys(vm_):
"""
Retrieve list of SSH public keys.
"""
key_filename = config.get_cloud_config_value(
"ssh_public_key", vm_, __opts__, search_global=False, default=None
)
if key_filename is not None:
key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename):
raise SaltCloudConfigError(
"The defined ssh_public_key '{}' does not exist".format(key_filename)
)
ssh_keys = []
with salt.utils.files.fopen(key_filename) as rfh:
for key in rfh.readlines():
ssh_keys.append(salt.utils.stringutils.to_unicode(key))
return ssh_keys
def get_key_filename(vm_):
"""
Check SSH private key file and return absolute path if exists.
"""
key_filename = config.get_cloud_config_value(
"ssh_private_key", vm_, __opts__, search_global=False, default=None
)
if key_filename is not None:
key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename):
raise SaltCloudConfigError(
"The defined ssh_private_key '{}' does not exist".format(key_filename)
)
return key_filename
def signal_event(vm_, event, description):
args = __utils__["cloud.filter_event"](
event, vm_, ["name", "profile", "provider", "driver"]
)
__utils__["cloud.fire_event"](
"event",
description,
"salt/cloud/{}/creating".format(vm_["name"]),
args=args,
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_["profile"]
and config.is_profile_configured(
__opts__,
(_get_active_provider_name() or "profitbricks"),
vm_["profile"],
)
is False
):
return False
except AttributeError:
pass
if "image_alias" in vm_ and not version_compatible("4.0"):
raise SaltCloudNotFound(
"The 'image_alias' parameter requires the profitbricks "
"SDK v4.0.0 or greater."
)
if "image" not in vm_ and "image_alias" not in vm_:
log.error("The image or image_alias parameter is required.")
signal_event(vm_, "creating", "starting create")
data = None
datacenter_id = get_datacenter_id()
conn = get_conn()
# Assemble list of network interfaces from the cloud profile config.
nics = _get_nics(vm_)
# Assemble list of volumes from the cloud profile config.
volumes = [_get_system_volume(vm_)]
if "volumes" in vm_:
volumes.extend(_get_data_volumes(vm_))
# Assembla the composite server object.
server = _get_server(vm_, volumes, nics)
signal_event(vm_, "requesting", "requesting instance")
try:
data = conn.create_server(datacenter_id=datacenter_id, server=server)
log.info(
"Create server request ID: %s",
data["requestId"],
exc_info_on_loglevel=logging.DEBUG,
)
_wait_for_completion(conn, data, get_wait_timeout(vm_), "create_server")
except PBError as exc:
log.error(
"Error creating %s on ProfitBricks\n\n"
"The following exception was thrown by the profitbricks library "
"when trying to run the initial deployment: \n%s",
vm_["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return False
except Exception as exc: # pylint: disable=W0703
log.error(
"Error creating %s \n\nError: \n%s",
vm_["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return False
vm_["server_id"] = data["id"]
def __query_node_data(vm_, data):
"""
Query node data until node becomes available.
"""
running = False
try:
data = show_instance(vm_["name"], "action")
if not data:
return False
log.debug(
"Loaded node data for %s:\nname: %s\nstate: %s",
vm_["name"],
pprint.pformat(data["name"]),
data["state"],
)
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get nodes list: %s",
err,
# Show the trackback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
# Trigger a failure in the wait for IP function
return False
running = data["state"] == "RUNNING"
if not running:
# Still not running, trigger another iteration
return
if ssh_interface(vm_) == "private_lan" and data["private_ips"]:
vm_["ssh_host"] = data["private_ips"][0]
if ssh_interface(vm_) != "private_lan" and data["public_ips"]:
vm_["ssh_host"] = data["public_ips"][0]
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
"wait_for_ip_timeout", vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
"wait_for_ip_interval", vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_["name"])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc.message))
log.debug("VM is now running")
log.info("Created Cloud VM %s", vm_)
log.debug("%s VM creation details:\n%s", vm_, pprint.pformat(data))
signal_event(vm_, "created", "created instance")
if "ssh_host" in vm_:
vm_["key_filename"] = get_key_filename(vm_)
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(data)
return ret
else:
raise SaltCloudSystemExit("A valid IP address was not found.")
def destroy(name, call=None):
"""
destroy a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: array of booleans , true if successfully stopped and true if
successfully removed
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
"""
if call == "function":
raise SaltCloudSystemExit(
"The destroy action must be called with -d, --destroy, -a or --action."
)
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
attached_volumes = None
delete_volumes = config.get_cloud_config_value(
"delete_volumes", get_configured_provider(), __opts__, search_global=False
)
# Get volumes before the server is deleted
attached_volumes = conn.get_attached_volumes(
datacenter_id=datacenter_id, server_id=node["id"]
)
conn.delete_server(datacenter_id=datacenter_id, server_id=node["id"])
# The server is deleted and now is safe to delete the volumes
if delete_volumes:
for vol in attached_volumes["items"]:
log.debug("Deleting volume %s", vol["id"])
conn.delete_volume(datacenter_id=datacenter_id, volume_id=vol["id"])
log.debug("Deleted volume %s", vol["id"])
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
return True
def reboot(name, call=None):
"""
reboot a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
"""
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.reboot_server(datacenter_id=datacenter_id, server_id=node["id"])
return True
def stop(name, call=None):
"""
stop a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
"""
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.stop_server(datacenter_id=datacenter_id, server_id=node["id"])
return True
def start(name, call=None):
"""
start a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a start vm_name
"""
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.start_server(datacenter_id=datacenter_id, server_id=node["id"])
return True
def _override_size(vm_):
"""
Apply any extra component overrides to VM from the cloud profile.
"""
vm_size = get_size(vm_)
if "cores" in vm_:
vm_size["cores"] = vm_["cores"]
if "ram" in vm_:
vm_size["ram"] = vm_["ram"]
return vm_size
def _get_server(vm_, volumes, nics):
"""
Construct server instance from cloud profile config
"""
# Apply component overrides to the size from the cloud profile config
vm_size = _override_size(vm_)
# Set the server availability zone from the cloud profile config
availability_zone = config.get_cloud_config_value(
"availability_zone", vm_, __opts__, default=None, search_global=False
)
# Assign CPU family from the cloud profile config
cpu_family = config.get_cloud_config_value(
"cpu_family", vm_, __opts__, default=None, search_global=False
)
# Contruct server object
return Server(
name=vm_["name"],
ram=vm_size["ram"],
availability_zone=availability_zone,
cores=vm_size["cores"],
cpu_family=cpu_family,
create_volumes=volumes,
nics=nics,
)
def _get_system_volume(vm_):
"""
Construct VM system volume list from cloud profile config
"""
# Override system volume size if 'disk_size' is defined in cloud profile
disk_size = get_size(vm_)["disk"]
if "disk_size" in vm_:
disk_size = vm_["disk_size"]
# Construct the system volume
volume = Volume(
name="{} Storage".format(vm_["name"]),
size=disk_size,
disk_type=get_disk_type(vm_),
)
if "image_password" in vm_:
image_password = vm_["image_password"]
volume.image_password = image_password
# Retrieve list of SSH public keys
ssh_keys = get_public_keys(vm_)
volume.ssh_keys = ssh_keys
if "image_alias" in vm_.keys():
volume.image_alias = vm_["image_alias"]
else:
volume.image = get_image(vm_)["id"]
# Set volume availability zone if defined in the cloud profile
if "disk_availability_zone" in vm_:
volume.availability_zone = vm_["disk_availability_zone"]
return volume
def _get_data_volumes(vm_):
"""
Construct a list of optional data volumes from the cloud profile
"""
ret = []
volumes = vm_["volumes"]
for key, value in volumes.items():
# Verify the required 'disk_size' property is present in the cloud
# profile config
if "disk_size" not in volumes[key].keys():
raise SaltCloudConfigError(
"The volume '{}' is missing 'disk_size'".format(key)
)
# Use 'HDD' if no 'disk_type' property is present in cloud profile
if "disk_type" not in volumes[key].keys():
volumes[key]["disk_type"] = "HDD"
# Construct volume object and assign to a list.
volume = Volume(
name=key,
size=volumes[key]["disk_size"],
disk_type=volumes[key]["disk_type"],
licence_type="OTHER",
)
# Set volume availability zone if defined in the cloud profile
if "disk_availability_zone" in volumes[key].keys():
volume.availability_zone = volumes[key]["disk_availability_zone"]
ret.append(volume)
return ret
def _get_ip_addresses(ip_addresses):
"""
Construct a list of ip address
"""
ret = []
for item in ip_addresses:
ret.append(item)
return ret
def _get_firewall_rules(firewall_rules):
"""
Construct a list of optional firewall rules from the cloud profile.
"""
ret = []
for key, value in firewall_rules.items():
# Verify the required 'protocol' property is present in the cloud
# profile config
if "protocol" not in firewall_rules[key].keys():
raise SaltCloudConfigError(
"The firewall rule '{}' is missing 'protocol'".format(key)
)
ret.append(
FirewallRule(
name=key,
protocol=firewall_rules[key].get("protocol", None),
source_mac=firewall_rules[key].get("source_mac", None),
source_ip=firewall_rules[key].get("source_ip", None),
target_ip=firewall_rules[key].get("target_ip", None),
port_range_start=firewall_rules[key].get("port_range_start", None),
port_range_end=firewall_rules[key].get("port_range_end", None),
icmp_type=firewall_rules[key].get("icmp_type", None),
icmp_code=firewall_rules[key].get("icmp_code", None),
)
)
return ret
def _wait_for_completion(conn, promise, wait_timeout, msg):
"""
Poll request status until resource is provisioned.
"""
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = conn.get_request(
request_id=promise["requestId"], status=True
)
if operation_result["metadata"]["status"] == "DONE":
return
elif operation_result["metadata"]["status"] == "FAILED":
raise Exception(
"Request: {}, requestId: {} failed to complete:\n{}".format(
msg,
str(promise["requestId"]),
operation_result["metadata"]["message"],
)
)
raise Exception(
'Timed out waiting for asynchronous operation {} "{}" to complete.'.format(
msg, str(promise["requestId"])
)
) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/cloud/clouds/profitbricks.py | 0.523908 | 0.152979 | profitbricks.py | pypi |
import logging
import os
import pprint
import time
import salt.config as config
import salt.utils.cloud
import salt.utils.json
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
SaltCloudNotFound,
SaltCloudSystemExit,
)
log = logging.getLogger(__name__)
__virtualname__ = "scaleway"
# Only load in this module if the Scaleway configurations are in place
def __virtual__():
"""
Check for Scaleway configurations.
"""
if get_configured_provider() is False:
return False
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""Return the first configured instance."""
return config.is_provider_configured(
__opts__, _get_active_provider_name() or __virtualname__, ("token",)
)
def avail_images(call=None):
"""Return a list of the images that are on the provider."""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
"-f or --function, or with the --list-images option"
)
items = query(method="images", root="marketplace_root")
ret = {}
for image in items["images"]:
ret[image["id"]] = {}
for item in image:
ret[image["id"]][item] = str(image[item])
return ret
def list_nodes(call=None):
"""Return a list of the BareMetal servers that are on the provider."""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes function must be called with -f or --function."
)
items = query(method="servers")
ret = {}
for node in items["servers"]:
public_ips = []
private_ips = []
image_id = ""
if node.get("public_ip"):
public_ips = [node["public_ip"]["address"]]
if node.get("private_ip"):
private_ips = [node["private_ip"]]
if node.get("image"):
image_id = node["image"]["id"]
ret[node["name"]] = {
"id": node["id"],
"image_id": image_id,
"public_ips": public_ips,
"private_ips": private_ips,
"size": node["volumes"]["0"]["size"],
"state": node["state"],
}
return ret
def list_nodes_full(call=None):
"""Return a list of the BareMetal servers that are on the provider."""
if call == "action":
raise SaltCloudSystemExit(
"list_nodes_full must be called with -f or --function"
)
items = query(method="servers")
# For each server, iterate on its parameters.
ret = {}
for node in items["servers"]:
ret[node["name"]] = {}
for item in node:
value = node[item]
ret[node["name"]][item] = value
return ret
def list_nodes_select(call=None):
"""Return a list of the BareMetal servers that are on the provider, with
select fields.
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full("function"),
__opts__["query.selection"],
call,
)
def get_image(server_):
"""Return the image object to use."""
images = avail_images()
server_image = str(
config.get_cloud_config_value("image", server_, __opts__, search_global=False)
)
for image in images:
if server_image in (images[image]["name"], images[image]["id"]):
return images[image]["id"]
raise SaltCloudNotFound(
"The specified image, '{}', could not be found.".format(server_image)
)
def create_node(args):
"""Create a node."""
node = query(method="servers", args=args, http_method="POST")
action = query(
method="servers",
server_id=node["server"]["id"],
command="action",
args={"action": "poweron"},
http_method="POST",
)
return node
def create(server_):
"""
Create a single BareMetal server from a data dict.
"""
try:
# Check for required profile parameters before sending any API calls.
if (
server_["profile"]
and config.is_profile_configured(
__opts__,
_get_active_provider_name() or "scaleway",
server_["profile"],
vm_=server_,
)
is False
):
return False
except AttributeError:
pass
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(server_["name"]),
args=__utils__["cloud.filter_event"](
"creating", server_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.info("Creating a BareMetal server %s", server_["name"])
access_key = config.get_cloud_config_value(
"access_key", get_configured_provider(), __opts__, search_global=False
)
commercial_type = config.get_cloud_config_value(
"commercial_type", server_, __opts__, default="C1"
)
key_filename = config.get_cloud_config_value(
"ssh_key_file", server_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
"The defined key_filename '{}' does not exist".format(key_filename)
)
ssh_password = config.get_cloud_config_value("ssh_password", server_, __opts__)
kwargs = {
"name": server_["name"],
"organization": access_key,
"image": get_image(server_),
"commercial_type": commercial_type,
}
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{}/requesting".format(server_["name"]),
args={
"kwargs": __utils__["cloud.filter_event"](
"requesting", kwargs, list(kwargs)
),
},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
try:
ret = create_node(kwargs)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Error creating %s on Scaleway\n\n"
"The following exception was thrown when trying to "
"run the initial deployment: %s",
server_["name"],
exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
return False
def __query_node_data(server_name):
"""Called to check if the server has a public IP address."""
data = show_instance(server_name, "action")
if data and data.get("public_ip"):
return data
return False
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(server_["name"],),
timeout=config.get_cloud_config_value(
"wait_for_ip_timeout", server_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
"wait_for_ip_interval", server_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(server_["name"])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
server_["ssh_host"] = data["public_ip"]["address"]
server_["ssh_password"] = ssh_password
server_["key_filename"] = key_filename
ret = __utils__["cloud.bootstrap"](server_, __opts__)
ret.update(data)
log.info("Created BareMetal server '%s'", server_["name"])
log.debug(
"'%s' BareMetal server creation details:\n%s",
server_["name"],
pprint.pformat(data),
)
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(server_["name"]),
args=__utils__["cloud.filter_event"](
"created", server_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def query(
method="servers",
server_id=None,
command=None,
args=None,
http_method="GET",
root="api_root",
):
"""Make a call to the Scaleway API."""
if root == "api_root":
default_url = "https://cp-par1.scaleway.com"
else:
default_url = "https://api-marketplace.scaleway.com"
vm_ = get_configured_provider()
base_path = str(
config.get_cloud_config_value(
root,
vm_,
__opts__,
search_global=False,
default=default_url,
)
)
path = "{}/{}/".format(base_path, method)
if server_id:
path += "{}/".format(server_id)
if command:
path += command
if not isinstance(args, dict):
args = {}
token = config.get_cloud_config_value("token", vm_, __opts__, search_global=False)
data = salt.utils.json.dumps(args)
request = __utils__["http.query"](
path,
method=http_method,
data=data,
headers={
"X-Auth-Token": token,
"User-Agent": "salt-cloud",
"Content-Type": "application/json",
},
)
if request.status_code > 299:
raise SaltCloudSystemExit(
"An error occurred while querying Scaleway. HTTP Code: {} "
"Error: '{}'".format(request.status_code, request.text)
)
# success without data
if request["status"] == 204:
return True
return salt.utils.json.loads(request["body"])
def script(server_):
"""Return the script deployment object."""
return salt.utils.cloud.os_script(
config.get_cloud_config_value("script", server_, __opts__),
server_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, server_)
),
)
def show_instance(name, call=None):
"""Show the details from a Scaleway BareMetal server."""
if call != "action":
raise SaltCloudSystemExit(
"The show_instance action must be called with -a or --action."
)
node = _get_node(name)
__utils__["cloud.cache_node"](node, _get_active_provider_name(), __opts__)
return node
def _get_node(name):
for attempt in reversed(list(range(10))):
try:
return list_nodes_full()[name]
except KeyError:
log.debug(
"Failed to get the data for node '%s'. Remaining attempts: %s",
name,
attempt,
)
# Just a little delay between attempts...
time.sleep(0.5)
return {}
def destroy(name, call=None):
"""Destroy a node. Will check termination protection and warn if enabled.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
"""
if call == "function":
raise SaltCloudSystemExit(
"The destroy action must be called with -d, --destroy, -a or --action."
)
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
data = show_instance(name, call="action")
node = query(
method="servers",
server_id=data["id"],
command="action",
args={"action": "terminate"},
http_method="POST",
)
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
return node | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/cloud/clouds/scaleway.py | 0.621081 | 0.153359 | scaleway.py | pypi |
import hashlib
import logging
import os
import salt.crypt
import salt.key
import salt.utils.crypt
import salt.utils.files
import salt.utils.platform
from salt.utils.sanitizers import clean
__func_alias__ = {
"list_": "list",
"key_str": "print",
}
log = logging.getLogger(__name__)
def list_(match):
"""
List all the keys under a named status. Returns a dictionary.
match
The type of keys to list. The ``pre``, ``un``, and ``unaccepted``
options will list unaccepted/unsigned keys. ``acc`` or ``accepted`` will
list accepted/signed keys. ``rej`` or ``rejected`` will list rejected keys.
Finally, ``all`` will list all keys.
.. code-block:: python
>>> wheel.cmd('key.list', ['accepted'])
{'minions': ['minion1', 'minion2', 'minion3']}
"""
with salt.key.get_key(__opts__) as skey:
return skey.list_status(match)
def list_all():
"""
List all the keys. Returns a dictionary containing lists of the minions in
each salt-key category, including ``minions``, ``minions_rejected``,
``minions_denied``, etc. Returns a dictionary.
.. code-block:: python
>>> wheel.cmd('key.list_all')
{'local': ['master.pem', 'master.pub'], 'minions_rejected': [],
'minions_denied': [], 'minions_pre': [],
'minions': ['minion1', 'minion2', 'minion3']}
"""
with salt.key.get_key(__opts__) as skey:
return skey.all_keys()
def name_match(match):
"""
List all the keys based on a glob match
"""
with salt.key.get_key(__opts__) as skey:
return skey.name_match(match)
def accept(match, include_rejected=False, include_denied=False):
"""
Accept keys based on a glob match. Returns a dictionary.
match
The glob match of keys to accept.
include_rejected
To include rejected keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
include_denied
To include denied keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
.. code-block:: python
>>> wheel.cmd('key.accept', ['minion1'])
{'minions': ['minion1']}
"""
with salt.key.get_key(__opts__) as skey:
return skey.accept(
match, include_rejected=include_rejected, include_denied=include_denied
)
def accept_dict(match, include_rejected=False, include_denied=False):
"""
Accept keys based on a dict of keys. Returns a dictionary.
match
The dictionary of keys to accept.
include_rejected
To include rejected keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
.. versionadded:: 2016.3.4
include_denied
To include denied keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
.. versionadded:: 2016.3.4
Example to move a list of keys from the ``minions_pre`` (pending) directory
to the ``minions`` (accepted) directory:
.. code-block:: python
>>> wheel.cmd('key.accept_dict',
{
'minions_pre': [
'jerry',
'stuart',
'bob',
],
})
{'minions': ['jerry', 'stuart', 'bob']}
"""
with salt.key.get_key(__opts__) as skey:
return skey.accept(
match_dict=match,
include_rejected=include_rejected,
include_denied=include_denied,
)
def delete(match):
"""
Delete keys based on a glob match. Returns a dictionary.
match
The glob match of keys to delete.
.. code-block:: python
>>> wheel.cmd_async({'fun': 'key.delete', 'match': 'minion1'})
{'jid': '20160826201244808521', 'tag': 'salt/wheel/20160826201244808521'}
"""
with salt.key.get_key(__opts__) as skey:
return skey.delete_key(match)
def delete_dict(match):
"""
Delete keys based on a dict of keys. Returns a dictionary.
match
The dictionary of keys to delete.
.. code-block:: python
>>> wheel.cmd_async({'fun': 'key.delete_dict',
'match': {
'minions': [
'jerry',
'stuart',
'bob',
],
}})
{'jid': '20160826201244808521', 'tag': 'salt/wheel/20160826201244808521'}
"""
with salt.key.get_key(__opts__) as skey:
return skey.delete_key(match_dict=match)
def reject(match, include_accepted=False, include_denied=False):
"""
Reject keys based on a glob match. Returns a dictionary.
match
The glob match of keys to reject.
include_accepted
To include accepted keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
include_denied
To include denied keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
.. code-block:: python
>>> wheel.cmd_async({'fun': 'key.reject', 'match': 'minion1'})
{'jid': '20160826201244808521', 'tag': 'salt/wheel/20160826201244808521'}
"""
with salt.key.get_key(__opts__) as skey:
return skey.reject(
match, include_accepted=include_accepted, include_denied=include_denied
)
def reject_dict(match, include_accepted=False, include_denied=False):
"""
Reject keys based on a dict of keys. Returns a dictionary.
match
The dictionary of keys to reject.
include_accepted
To include accepted keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
.. versionadded:: 2016.3.4
include_denied
To include denied keys in the match along with pending keys, set this
to ``True``. Defaults to ``False``.
.. versionadded:: 2016.3.4
.. code-block:: python
>>> wheel.cmd_async({'fun': 'key.reject_dict',
'match': {
'minions': [
'jerry',
'stuart',
'bob',
],
}})
{'jid': '20160826201244808521', 'tag': 'salt/wheel/20160826201244808521'}
"""
with salt.key.get_key(__opts__) as skey:
return skey.reject(
match_dict=match,
include_accepted=include_accepted,
include_denied=include_denied,
)
def key_str(match):
r"""
Return information about the key. Returns a dictionary.
match
The key to return information about.
.. code-block:: python
>>> wheel.cmd('key.key_str', ['minion1'])
{'minions': {'minion1': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0B
...
TWugEQpPt\niQIDAQAB\n-----END PUBLIC KEY-----'}}
"""
with salt.key.get_key(__opts__) as skey:
return skey.key_str(match)
def master_key_str():
r"""
Returns master's public key. Returns a dictionary
.. code-block:: python
>>> wheel.cmd('key.master_key_str')
{'local': {'master.pub': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0B
...
TWugEQpPt\niQIDAQAB\n-----END PUBLIC KEY-----'}}
"""
keyname = "master.pub"
path_to_pubkey = os.path.join(__opts__["pki_dir"], keyname)
with salt.utils.files.fopen(path_to_pubkey, "r") as fp_:
keyvalue = salt.utils.stringutils.to_unicode(fp_.read())
return {"local": {keyname: keyvalue}}
def finger(match, hash_type=None):
"""
Return the matching key fingerprints. Returns a dictionary.
match
The key for with to retrieve the fingerprint.
hash_type
The hash algorithm used to calculate the fingerprint
.. code-block:: python
>>> wheel.cmd('key.finger', ['minion1'])
{'minions': {'minion1': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
"""
if hash_type is None:
hash_type = __opts__["hash_type"]
with salt.key.get_key(__opts__) as skey:
return skey.finger(match, hash_type)
def finger_master(hash_type=None):
"""
Return the fingerprint of the master's public key
hash_type
The hash algorithm used to calculate the fingerprint
.. code-block:: python
>>> wheel.cmd('key.finger_master')
{'local': {'master.pub': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
"""
keyname = "master.pub"
if hash_type is None:
hash_type = __opts__["hash_type"]
fingerprint = salt.utils.crypt.pem_finger(
os.path.join(__opts__["pki_dir"], keyname), sum_type=hash_type
)
return {"local": {keyname: fingerprint}}
def gen(id_=None, keysize=2048):
r"""
Generate a key pair. No keys are stored on the master. A key pair is
returned as a dict containing pub and priv keys. Returns a dictionary
containing the ``pub`` and ``priv`` keys with their generated values.
id\_
Set a name to generate a key pair for use with salt. If not specified,
a random name will be specified.
keysize
The size of the key pair to generate. The size must be ``2048``, which
is the default, or greater. If set to a value less than ``2048``, the
key size will be rounded up to ``2048``.
.. code-block:: python
>>> wheel.cmd('key.gen')
{'pub': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC
...
BBPfamX9gGPQTpN9e8HwcZjXQnmg8OrcUl10WHw09SDWLOlnW+ueTWugEQpPt\niQIDAQAB\n
-----END PUBLIC KEY-----',
'priv': '-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA42Kf+w9XeZWgguzv
...
QH3/W74X1+WTBlx4R2KGLYBiH+bCCFEQ/Zvcu4Xp4bIOPtRKozEQ==\n
-----END RSA PRIVATE KEY-----'}
"""
if id_ is None:
id_ = hashlib.sha512(os.urandom(32)).hexdigest()
else:
id_ = clean.filename(id_)
ret = {"priv": "", "pub": ""}
priv = salt.crypt.gen_keys(__opts__["pki_dir"], id_, keysize)
pub = "{}.pub".format(priv[: priv.rindex(".")])
with salt.utils.files.fopen(priv) as fp_:
ret["priv"] = salt.utils.stringutils.to_unicode(fp_.read())
with salt.utils.files.fopen(pub) as fp_:
ret["pub"] = salt.utils.stringutils.to_unicode(fp_.read())
# The priv key is given the Read-Only attribute. The causes `os.remove` to
# fail in Windows.
if salt.utils.platform.is_windows():
os.chmod(priv, 128)
os.remove(priv)
os.remove(pub)
return ret
def gen_accept(id_, keysize=2048, force=False):
r"""
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master. Returns
a dictionary.
id\_
The name of the minion for which to generate a key pair.
keysize
The size of the key pair to generate. The size must be ``2048``, which
is the default, or greater. If set to a value less than ``2048``, the
key size will be rounded up to ``2048``.
force
If a public key has already been accepted for the given minion on the
master, then the gen_accept function will return an empty dictionary
and not create a new key. This is the default behavior. If ``force``
is set to ``True``, then the minion's previously accepted key will be
overwritten.
.. code-block:: python
>>> wheel.cmd('key.gen_accept', ['foo'])
{'pub': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC
...
BBPfamX9gGPQTpN9e8HwcZjXQnmg8OrcUl10WHw09SDWLOlnW+ueTWugEQpPt\niQIDAQAB\n
-----END PUBLIC KEY-----',
'priv': '-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA42Kf+w9XeZWgguzv
...
QH3/W74X1+WTBlx4R2KGLYBiH+bCCFEQ/Zvcu4Xp4bIOPtRKozEQ==\n
-----END RSA PRIVATE KEY-----'}
We can now see that the ``foo`` minion's key has been accepted by the master:
.. code-block:: python
>>> wheel.cmd('key.list', ['accepted'])
{'minions': ['foo', 'minion1', 'minion2', 'minion3']}
"""
id_ = clean.id(id_)
ret = gen(id_, keysize)
acc_path = os.path.join(__opts__["pki_dir"], "minions", id_)
if os.path.isfile(acc_path) and not force:
return {}
with salt.utils.files.fopen(acc_path, "w+") as fp_:
fp_.write(salt.utils.stringutils.to_str(ret["pub"]))
return ret
def gen_keys(keydir=None, keyname=None, keysize=None, user=None):
"""
Generate minion RSA public keypair
"""
with salt.key.get_key(__opts__) as skey:
return skey.gen_keys(keydir, keyname, keysize, user)
def gen_signature(priv, pub, signature_path, auto_create=False, keysize=None):
"""
Generate master public-key-signature
"""
with salt.key.get_key(__opts__) as skey:
return skey.gen_keys_signature(priv, pub, signature_path, auto_create, keysize) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/wheel/key.py | 0.7324 | 0.230119 | key.py | pypi |
from salt.utils.dictdiffer import recursive_diff
def list_diff(list_a, list_b, key):
return ListDictDiffer(list_a, list_b, key)
class ListDictDiffer:
"""
Calculates the differences between two lists of dictionaries.
It matches the items based on a given key and uses the recursive_diff to
diff the two values.
"""
def __init__(self, current_list, next_list, key):
self._intersect = []
self._removed = []
self._added = []
self._new = next_list
self._current = current_list
self._key = key
for current_item in current_list:
if key not in current_item:
raise ValueError(
"The supplied key '{}' does not "
"exist in item, the available keys are: {}"
"".format(key, current_item.keys())
)
for next_item in next_list:
if key not in next_item:
raise ValueError(
"The supplied key '{}' does not "
"exist in item, the available keys are: "
"{}".format(key, next_item.keys())
)
if next_item[key] == current_item[key]:
item = {key: next_item[key], "old": current_item, "new": next_item}
self._intersect.append(item)
break
else:
self._removed.append(current_item)
for next_item in next_list:
for current_item in current_list:
if next_item[key] == current_item[key]:
break
else:
self._added.append(next_item)
def _get_recursive_difference(self, type):
"""Returns the recursive diff between dict values"""
if type == "intersect":
return [
recursive_diff(item["old"], item["new"]) for item in self._intersect
]
elif type == "added":
return [recursive_diff({}, item) for item in self._added]
elif type == "removed":
return [
recursive_diff(item, {}, ignore_missing_keys=False)
for item in self._removed
]
elif type == "all":
recursive_list = []
recursive_list.extend(
[recursive_diff(item["old"], item["new"]) for item in self._intersect]
)
recursive_list.extend([recursive_diff({}, item) for item in self._added])
recursive_list.extend(
[
recursive_diff(item, {}, ignore_missing_keys=False)
for item in self._removed
]
)
return recursive_list
else:
raise ValueError(
"The given type for recursive list matching is not supported."
)
@property
def removed(self):
"""Returns the objects which are removed from the list"""
return self._removed
@property
def added(self):
"""Returns the objects which are added to the list"""
return self._added
@property
def intersect(self):
"""Returns the intersect objects"""
return self._intersect
def remove_diff(self, diff_key=None, diff_list="intersect"):
"""Deletes an attribute from all of the intersect objects"""
if diff_list == "intersect":
for item in self._intersect:
item["old"].pop(diff_key, None)
item["new"].pop(diff_key, None)
if diff_list == "removed":
for item in self._removed:
item.pop(diff_key, None)
@property
def diffs(self):
"""
Returns a list of dictionaries with key value pairs.
The values are the differences between the items identified by the key.
"""
differences = []
for item in self._get_recursive_difference(type="all"):
if item.diffs:
if item.past_dict:
differences.append({item.past_dict[self._key]: item.diffs})
elif item.current_dict:
differences.append({item.current_dict[self._key]: item.diffs})
return differences
@property
def changes_str(self):
"""Returns a string describing the changes"""
changes = ""
for item in self._get_recursive_difference(type="intersect"):
if item.diffs:
changes = "".join(
[
changes,
# Tabulate comment deeper, show the key attribute and the value
# Next line should be tabulated even deeper,
# every change should be tabulated 1 deeper
"\tidentified by {} {}:\n\t{}\n".format(
self._key,
item.past_dict[self._key],
item.changes_str.replace("\n", "\n\t"),
),
]
)
for item in self._get_recursive_difference(type="removed"):
if item.past_dict:
changes = "".join(
[
changes,
# Tabulate comment deeper, show the key attribute and the value
"\tidentified by {} {}:\n\twill be removed\n".format(
self._key, item.past_dict[self._key]
),
]
)
for item in self._get_recursive_difference(type="added"):
if item.current_dict:
changes = "".join(
[
changes,
# Tabulate comment deeper, show the key attribute and the value
"\tidentified by {} {}:\n\twill be added\n".format(
self._key, item.current_dict[self._key]
),
]
)
return changes
@property
def changes_str2(self, tab_string=" "):
"""
Returns a string in a more compact format describing the changes.
The output better alligns with the one in recursive_diff.
"""
changes = []
for item in self._get_recursive_difference(type="intersect"):
if item.diffs:
changes.append(
"{tab}{0}={1} (updated):\n{tab}{tab}{2}".format(
self._key,
item.past_dict[self._key],
item.changes_str.replace("\n", "\n{0}{0}".format(tab_string)),
tab=tab_string,
)
)
for item in self._get_recursive_difference(type="removed"):
if item.past_dict:
changes.append(
"{tab}{0}={1} (removed)".format(
self._key, item.past_dict[self._key], tab=tab_string
)
)
for item in self._get_recursive_difference(type="added"):
if item.current_dict:
changes.append(
"{tab}{0}={1} (added): {2}".format(
self._key,
item.current_dict[self._key],
dict(item.current_dict),
tab=tab_string,
)
)
return "\n".join(changes)
@property
def new_values(self):
"""Returns the new values from the diff"""
def get_new_values_and_key(item):
values = item.new_values
if item.past_dict:
values.update({self._key: item.past_dict[self._key]})
else:
# This is a new item as it has no past_dict
values.update({self._key: item.current_dict[self._key]})
return values
return [
get_new_values_and_key(el)
for el in self._get_recursive_difference("all")
if el.diffs and el.current_dict
]
@property
def old_values(self):
"""Returns the old values from the diff"""
def get_old_values_and_key(item):
values = item.old_values
values.update({self._key: item.past_dict[self._key]})
return values
return [
get_old_values_and_key(el)
for el in self._get_recursive_difference("all")
if el.diffs and el.past_dict
]
def changed(self, selection="all"):
"""
Returns the list of changed values.
The key is added to each item.
selection
Specifies the desired changes.
Supported values are
``all`` - all changed items are included in the output
``intersect`` - changed items present in both lists are included
"""
changed = []
if selection == "all":
for recursive_item in self._get_recursive_difference(type="all"):
# We want the unset values as well
recursive_item.ignore_unset_values = False
key_val = (
str(recursive_item.past_dict[self._key])
if self._key in recursive_item.past_dict
else str(recursive_item.current_dict[self._key])
)
for change in recursive_item.changed():
if change != self._key:
changed.append(".".join([self._key, key_val, change]))
return changed
elif selection == "intersect":
# We want the unset values as well
for recursive_item in self._get_recursive_difference(type="intersect"):
recursive_item.ignore_unset_values = False
key_val = (
str(recursive_item.past_dict[self._key])
if self._key in recursive_item.past_dict
else str(recursive_item.current_dict[self._key])
)
for change in recursive_item.changed():
if change != self._key:
changed.append(".".join([self._key, key_val, change]))
return changed
@property
def current_list(self):
return self._current
@property
def new_list(self):
return self._new | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/listdiffer.py | 0.818229 | 0.327117 | listdiffer.py | pypi |
import copy
import logging
from salt.utils.odict import OrderedDict
__all__ = ["aggregate", "Aggregate", "Map", "Scalar", "Sequence"]
log = logging.getLogger(__name__)
class Aggregate:
"""
Aggregation base.
"""
class Map(OrderedDict, Aggregate):
"""
Map aggregation.
"""
class Sequence(list, Aggregate):
"""
Sequence aggregation.
"""
def Scalar(obj):
"""
Shortcut for Sequence creation
>>> Scalar('foo') == Sequence(['foo'])
True
"""
return Sequence([obj])
def levelise(level):
"""
Describe which levels are allowed to do deep merging.
level can be:
True
all levels are True
False
all levels are False
an int
only the first levels are True, the others are False
a sequence
it describes which levels are True, it can be:
* a list of bool and int values
* a string of 0 and 1 characters
"""
if not level: # False, 0, [] ...
return False, False
if level is True:
return True, True
if isinstance(level, int):
return True, level - 1
try: # a sequence
deep, subs = int(level[0]), level[1:]
return bool(deep), subs
except Exception as error: # pylint: disable=broad-except
log.warning(error)
raise
def mark(obj, map_class=Map, sequence_class=Sequence):
"""
Convert obj into an Aggregate instance
"""
if isinstance(obj, Aggregate):
return obj
if isinstance(obj, dict):
return map_class(obj)
if isinstance(obj, (list, tuple, set)):
return sequence_class(obj)
else:
return sequence_class([obj])
def aggregate(obj_a, obj_b, level=False, map_class=Map, sequence_class=Sequence):
"""
Merge obj_b into obj_a.
>>> aggregate('first', 'second', True) == ['first', 'second']
True
"""
deep, subdeep = levelise(level)
if deep:
obj_a = mark(obj_a, map_class=map_class, sequence_class=sequence_class)
obj_b = mark(obj_b, map_class=map_class, sequence_class=sequence_class)
if isinstance(obj_a, dict) and isinstance(obj_b, dict):
if isinstance(obj_a, Aggregate) and isinstance(obj_b, Aggregate):
# deep merging is more or less a.update(obj_b)
response = copy.copy(obj_a)
else:
# introspection on obj_b keys only
response = copy.copy(obj_b)
for key, value in obj_b.items():
if key in obj_a:
value = aggregate(obj_a[key], value, subdeep, map_class, sequence_class)
response[key] = value
return response
if isinstance(obj_a, Sequence) and isinstance(obj_b, Sequence):
response = obj_a.__class__(obj_a[:])
for value in obj_b:
if value not in obj_a:
response.append(value)
return response
response = copy.copy(obj_b)
if isinstance(obj_a, Aggregate) or isinstance(obj_b, Aggregate):
log.info("only one value marked as aggregate. keep `obj_b` value")
return response
log.debug("no value marked as aggregate. keep `obj_b` value")
return response | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/aggregation.py | 0.721547 | 0.292861 | aggregation.py | pypi |
r"""
A salt util for modifying the audit policies on the machine. This util is used
by the ``win_auditpol`` and ``win_lgpo`` modules.
Though this utility does not set group policy for auditing, it displays how all
auditing configuration is applied on the machine, either set directly or via
local or domain group policy.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.1
This util allows you to view and modify the audit settings as they are applied
on the machine. The audit settings are broken down into nine categories:
- Account Logon
- Account Management
- Detailed Tracking
- DS Access
- Logon/Logoff
- Object Access
- Policy Change
- Privilege Use
- System
The ``get_settings`` function will return the subcategories for all nine of
the above categories in one dictionary along with their auditing status.
To modify a setting you only need to specify the subcategory name and the value
you wish to set. Valid settings are:
- No Auditing
- Success
- Failure
- Success and Failure
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of all audit settings
salt.utils.win_lgpo_auditpol.get_settings()
# Get the current state of all audit settings in the "Account Logon"
# category
salt.utils.win_lgpo_auditpol.get_settings(category="Account Logon")
# Get current state of the "Credential Validation" setting
salt.utils.win_lgpo_auditpol.get_setting(name='Credential Validation')
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
"""
import logging
import re
import tempfile
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.platform
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "auditpol"
categories = [
"Account Logon",
"Account Management",
"Detailed Tracking",
"DS Access",
"Logon/Logoff",
"Object Access",
"Policy Change",
"Privilege Use",
"System",
]
settings = {
"No Auditing": "/success:disable /failure:disable",
"Success": "/success:enable /failure:disable",
"Failure": "/success:disable /failure:enable",
"Success and Failure": "/success:enable /failure:enable",
}
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
"""
Only load if on a Windows system
"""
if not salt.utils.platform.is_windows():
return False, "This utility only available on Windows"
return __virtualname__
def _auditpol_cmd(cmd):
"""
Helper function for running the auditpol command
Args:
cmd (str): the auditpol command to run
Returns:
list: A list containing each line of the return (splitlines)
Raises:
CommandExecutionError: If the command encounters an error
"""
ret = salt.modules.cmdmod.run_all(cmd="auditpol {}".format(cmd), python_shell=True)
if ret["retcode"] == 0:
return ret["stdout"].splitlines()
msg = "Error executing auditpol command: {}\n".format(cmd)
msg += "\n".join(ret["stdout"])
raise CommandExecutionError(msg)
def get_settings(category="All"):
"""
Get the current configuration for all audit settings specified in the
category
Args:
category (str):
One of the nine categories to return. Can also be ``All`` to return
the settings for all categories. Valid options are:
- Account Logon
- Account Management
- Detailed Tracking
- DS Access
- Logon/Logoff
- Object Access
- Policy Change
- Privilege Use
- System
- All
Default value is ``All``
Returns:
dict: A dictionary containing all subcategories for the specified
category along with their current configuration
Raises:
KeyError: On invalid category
CommandExecutionError: If an error is encountered retrieving the settings
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of all audit settings
salt.utils.win_lgpo_auditpol.get_settings()
# Get the current state of all audit settings in the "Account Logon"
# category
salt.utils.win_lgpo_auditpol.get_settings(category="Account Logon")
"""
# Parameter validation
if category.lower() in ["all", "*"]:
category = "*"
elif category.lower() not in [x.lower() for x in categories]:
raise KeyError('Invalid category: "{}"'.format(category))
cmd = '/get /category:"{}"'.format(category)
results = _auditpol_cmd(cmd)
ret = {}
# Skip the first 2 lines
for line in results[3:]:
if " " in line.strip():
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line.strip()))] * 2))))
return ret
def get_setting(name):
"""
Get the current configuration for the named audit setting
Args:
name (str): The name of the setting to retrieve
Returns:
str: The current configuration for the named setting
Raises:
KeyError: On invalid setting name
CommandExecutionError: If an error is encountered retrieving the settings
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of the "Credential Validation" setting
salt.utils.win_lgpo_auditpol.get_setting(name='Credential Validation')
"""
current_settings = get_settings(category="All")
for setting in current_settings:
if name.lower() == setting.lower():
return current_settings[setting]
raise KeyError("Invalid name: {}".format(name))
def _get_valid_names():
if "auditpol.valid_names" not in __context__:
settings = get_settings(category="All")
__context__["auditpol.valid_names"] = [k.lower() for k in settings]
return __context__["auditpol.valid_names"]
def set_setting(name, value):
"""
Set the configuration for the named audit setting
Args:
name (str):
The name of the setting to configure
value (str):
The configuration for the named value. Valid options are:
- No Auditing
- Success
- Failure
- Success and Failure
Returns:
bool: True if successful
Raises:
KeyError: On invalid ``name`` or ``value``
CommandExecutionError: If an error is encountered modifying the setting
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
"""
# Input validation
if name.lower() not in _get_valid_names():
raise KeyError("Invalid name: {}".format(name))
for setting in settings:
if value.lower() == setting.lower():
cmd = '/set /subcategory:"{}" {}'.format(name, settings[setting])
break
else:
raise KeyError("Invalid setting value: {}".format(value))
_auditpol_cmd(cmd)
return True
def get_auditpol_dump():
"""
Gets the contents of an auditpol /backup. Used by the LGPO module to get
fieldnames and GUIDs for Advanced Audit policies.
Returns:
list: A list of lines form the backup file
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
dump = salt.utils.win_lgpo_auditpol.get_auditpol_dump()
"""
# Just get a temporary file name
# NamedTemporaryFile will delete the file it creates by default on Windows
with tempfile.NamedTemporaryFile(suffix=".csv") as tmp_file:
csv_file = tmp_file.name
cmd = "/backup /file:{}".format(csv_file)
_auditpol_cmd(cmd)
with salt.utils.files.fopen(csv_file) as fp:
return fp.readlines() | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/win_lgpo_auditpol.py | 0.909881 | 0.38318 | win_lgpo_auditpol.py | pypi |
import copy
from collections.abc import Mapping
def diff(current_dict, past_dict):
return DictDiffer(current_dict, past_dict)
class DictDiffer:
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(list(current_dict)), set(list(past_dict))
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return {o for o in self.intersect if self.past_dict[o] != self.current_dict[o]}
def unchanged(self):
return {o for o in self.intersect if self.past_dict[o] == self.current_dict[o]}
def deep_diff(old, new, ignore=None):
ignore = ignore or []
res = {}
old = copy.deepcopy(old)
new = copy.deepcopy(new)
stack = [(old, new, False)]
while stack:
tmps = []
tmp_old, tmp_new, reentrant = stack.pop()
for key in set(list(tmp_old) + list(tmp_new)):
if key in tmp_old and key in tmp_new and tmp_old[key] == tmp_new[key]:
del tmp_old[key]
del tmp_new[key]
continue
if not reentrant:
if key in tmp_old and key in ignore:
del tmp_old[key]
if key in tmp_new and key in ignore:
del tmp_new[key]
if isinstance(tmp_old.get(key), Mapping) and isinstance(
tmp_new.get(key), Mapping
):
tmps.append((tmp_old[key], tmp_new[key], False))
if tmps:
stack.extend([(tmp_old, tmp_new, True)] + tmps)
if old:
res["old"] = old
if new:
res["new"] = new
return res
def recursive_diff(past_dict, current_dict, ignore_missing_keys=True):
"""
Returns a RecursiveDictDiffer object that computes the recursive diffs
between two dictionaries
past_dict
Past dictionary
current_dict
Current dictionary
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
Default is True.
"""
return RecursiveDictDiffer(past_dict, current_dict, ignore_missing_keys)
class RecursiveDictDiffer(DictDiffer):
"""
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format above (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
"""
NONE_VALUE = "<_null_>"
def __init__(self, past_dict, current_dict, ignore_missing_keys):
"""
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
"""
super().__init__(current_dict, past_dict)
self._diffs = self._get_diffs(
self.current_dict, self.past_dict, ignore_missing_keys
)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
"""
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
"""
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {"new": dict1[p], "old": cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(
dict1[p], dict2[p], ignore_missing_keys
)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {"new": dict1[p], "old": dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {"new": cls.NONE_VALUE, "old": dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type="new"):
"""
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
"""
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update({p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
"""
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
"""
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ["new", "old"]:
# Some string formatting
old_value = diff_dict[p]["old"]
if diff_dict[p]["old"] == cls.NONE_VALUE:
old_value = "nothing"
elif isinstance(diff_dict[p]["old"], str):
old_value = "'{}'".format(diff_dict[p]["old"])
elif isinstance(diff_dict[p]["old"], list):
old_value = "'{}'".format(", ".join(diff_dict[p]["old"]))
new_value = diff_dict[p]["new"]
if diff_dict[p]["new"] == cls.NONE_VALUE:
new_value = "nothing"
elif isinstance(diff_dict[p]["new"], str):
new_value = "'{}'".format(diff_dict[p]["new"])
elif isinstance(diff_dict[p]["new"], list):
new_value = "'{}'".format(", ".join(diff_dict[p]["new"]))
changes_strings.append(
"{} from {} to {}".format(p, old_value, new_value)
)
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append("{}:".format(p))
changes_strings.extend([" {}".format(c) for c in sub_changes])
return changes_strings
def added(self):
"""
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(_added(diffs[key], prefix="{}{}.".format(prefix, key)))
elif diffs[key]["old"] == self.NONE_VALUE:
if isinstance(diffs[key]["new"], dict):
keys.extend(
_added(
diffs[key]["new"], prefix="{}{}.".format(prefix, key)
)
)
else:
keys.append("{}{}".format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=""))
def removed(self):
"""
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_removed(diffs[key], prefix="{}{}.".format(prefix, key))
)
elif diffs[key]["new"] == self.NONE_VALUE:
keys.append("{}{}".format(prefix, key))
elif isinstance(diffs[key]["new"], dict):
keys.extend(
_removed(diffs[key]["new"], prefix="{}{}.".format(prefix, key))
)
return keys
return sorted(_removed(self._diffs, prefix=""))
def changed(self):
"""
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and "old" not in diffs[key]:
keys.extend(
_changed(diffs[key], prefix="{}{}.".format(prefix, key))
)
continue
if self.ignore_unset_values:
if (
"old" in diffs[key]
and "new" in diffs[key]
and diffs[key]["old"] != self.NONE_VALUE
and diffs[key]["new"] != self.NONE_VALUE
):
if isinstance(diffs[key]["new"], dict):
keys.extend(
_changed(
diffs[key]["new"],
prefix="{}{}.".format(prefix, key),
)
)
else:
keys.append("{}{}".format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key], prefix="{}{}.".format(prefix, key))
)
else:
if "old" in diffs[key] and "new" in diffs[key]:
if isinstance(diffs[key]["new"], dict):
keys.extend(
_changed(
diffs[key]["new"],
prefix="{}{}.".format(prefix, key),
)
)
else:
keys.append("{}{}".format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key], prefix="{}{}.".format(prefix, key))
)
return keys
return sorted(_changed(self._diffs, prefix=""))
def unchanged(self):
"""
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
"""
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append("{}{}".format(prefix, key))
elif isinstance(current_dict[key], dict):
if "new" in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(
current_dict[key],
diffs[key],
prefix="{}{}.".format(prefix, key),
)
)
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=""))
@property
def diffs(self):
"""Returns a dict with the recursive diffs current_dict - past_dict"""
return self._diffs
@property
def new_values(self):
"""Returns a dictionary with the new values"""
return self._get_values(self._diffs, type="new")
@property
def old_values(self):
"""Returns a dictionary with the old values"""
return self._get_values(self._diffs, type="old")
@property
def changes_str(self):
"""Returns a string describing the changes"""
return "\n".join(self._get_changes(self._diffs)) | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/dictdiffer.py | 0.667256 | 0.364212 | dictdiffer.py | pypi |
import salt.utils.files
import salt.utils.stringutils
from salt.exceptions import SaltException
class InvalidFileMode(SaltException):
"""
An invalid file mode was used to open the file passed to the buffer
"""
class BufferedReader:
"""
This object allows iterating through the contents of a file keeping
X configurable bytes in memory which can be used to, for example,
do regex search/matching on more than a single line.
So, **an imaginary, non accurate**, example could be:
1 - Initiate the BufferedReader filling it to max_in_men:
br = [1, 2, 3]
2 - next chunk(pop chunk_size from the left, append chunk_size to the
right):
br = [2, 3, 4]
:type path: str
:param path: The file path to be read
:type max_in_mem: int
:param max_in_mem: The maximum bytes kept in memory while iterating through
the file. Default 256KB.
:type chunk_size: int
:param chunk_size: The size of each consequent read chunk. Default 32KB.
:type mode: str
:param mode: The mode the file should be opened. **Only read modes**.
"""
def __init__(self, path, max_in_mem=256 * 1024, chunk_size=32 * 1024, mode="r"):
if "a" in mode or "w" in mode:
raise InvalidFileMode("Cannot open file in write or append mode")
self.__path = path
# pylint: disable=resource-leakage
self.__file = salt.utils.files.fopen(self.__path, mode)
# pylint: enable=resource-leakage
self.__max_in_mem = max_in_mem
self.__chunk_size = chunk_size
self.__buffered = None
# Public attributes
@property
def buffered(self):
return self.__buffered
# Support iteration
def __iter__(self):
return self
def next(self):
"""
Return the next iteration by popping `chunk_size` from the left and
appending `chunk_size` to the right if there's info on the file left
to be read.
"""
if self.__buffered is None:
# Use floor division to force multiplier to an integer
multiplier = self.__max_in_mem // self.__chunk_size
self.__buffered = ""
else:
multiplier = 1
self.__buffered = self.__buffered[self.__chunk_size :]
data = self.__file.read(self.__chunk_size * multiplier)
# Data is a byte object in Python 3
# Decode it in order to append to self.__buffered str later
# Use the salt util in case it's already a string (Windows)
data = salt.utils.stringutils.to_str(data)
if not data:
self.__file.close()
raise StopIteration
self.__buffered += data
return self.__buffered
# Alias next to __next__ for Py3 compatibility
__next__ = next
# Support with statements
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.__file.closed is False:
self.__file.close() | /salt-ssh-9000.tar.gz/salt-ssh-9000/salt/utils/filebuffer.py | 0.778439 | 0.433262 | filebuffer.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.