repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/state.py
|
RemoteHighState.compile_master
|
python
|
def compile_master(self):
'''
Return the state data from the master
'''
load = {'grains': self.grains,
'opts': self.opts,
'cmd': '_master_state'}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
|
Return the state data from the master
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L4348-L4358
| null |
class RemoteHighState(object):
'''
Manage gathering the data from the master
'''
# XXX: This class doesn't seem to be used anywhere
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.client.ReqChannel.factory(self.opts['master_uri'])
self._closing = False
def destroy(self):
if self._closing:
return
self._closing = True
self.channel.close()
def __del__(self):
self.destroy()
|
saltstack/salt
|
salt/output/txt.py
|
output
|
python
|
def output(data, **kwargs): # pylint: disable=unused-argument
'''
Output the data in lines, very nice for running commands
'''
ret = ''
if hasattr(data, 'keys'):
for key in data:
value = data[key]
# Don't blow up on non-strings
try:
for line in value.splitlines():
ret += '{0}: {1}\n'.format(key, line)
except AttributeError:
ret += '{0}: {1}\n'.format(key, value)
else:
try:
ret += data + '\n'
except TypeError:
# For non-dictionary, non-string data, just use print
ret += '{0}\n'.format(pprint.pformat(data))
return ret
|
Output the data in lines, very nice for running commands
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/txt.py#L16-L37
| null |
# -*- coding: utf-8 -*-
'''
Simple text outputter
=====================
The txt outputter has been developed to make the output from shell
commands on minions appear as they do when the command is executed
on the minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import pprint
|
saltstack/salt
|
salt/utils/configcomparer.py
|
compare_and_update_config
|
python
|
def compare_and_update_config(config, update_config, changes, namespace=''):
'''
Recursively compare two configs, writing any needed changes to the
update_config and capturing changes in the changes dict.
'''
if isinstance(config, dict):
if not update_config:
if config:
# the updated config is more valid--report that we are using it
changes[namespace] = {
'new': config,
'old': update_config,
}
return config
elif not isinstance(update_config, dict):
# new config is a dict, other isn't--new one wins
changes[namespace] = {
'new': config,
'old': update_config,
}
return config
else:
# compare each key in the base config with the values in the
# update_config, overwriting the values that are different but
# keeping any that are not defined in config
for key, value in six.iteritems(config):
_namespace = key
if namespace:
_namespace = '{0}.{1}'.format(namespace, _namespace)
update_config[key] = compare_and_update_config(
value,
update_config.get(key, None),
changes,
namespace=_namespace,
)
return update_config
elif isinstance(config, list):
if not update_config:
if config:
# the updated config is more valid--report that we are using it
changes[namespace] = {
'new': config,
'old': update_config,
}
return config
elif not isinstance(update_config, list):
# new config is a list, other isn't--new one wins
changes[namespace] = {
'new': config,
'old': update_config,
}
return config
else:
# iterate through config list, ensuring that each index in the
# update_config list is the same
for idx, item in enumerate(config):
_namespace = '[{0}]'.format(idx)
if namespace:
_namespace = '{0}{1}'.format(namespace, _namespace)
_update = None
if len(update_config) > idx:
_update = update_config[idx]
if _update:
update_config[idx] = compare_and_update_config(
config[idx],
_update,
changes,
namespace=_namespace,
)
else:
changes[_namespace] = {
'new': config[idx],
'old': _update,
}
update_config.append(config[idx])
if len(update_config) > len(config):
# trim any items in update_config that are not in config
for idx, old_item in enumerate(update_config):
if idx < len(config):
continue
_namespace = '[{0}]'.format(idx)
if namespace:
_namespace = '{0}{1}'.format(namespace, _namespace)
changes[_namespace] = {
'new': None,
'old': old_item,
}
del update_config[len(config):]
return update_config
else:
if config != update_config:
changes[namespace] = {
'new': config,
'old': update_config,
}
return config
|
Recursively compare two configs, writing any needed changes to the
update_config and capturing changes in the changes dict.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/configcomparer.py#L14-L112
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def compare_and_update_config(config, update_config, changes, namespace=''):\n '''\n Recursively compare two configs, writing any needed changes to the\n update_config and capturing changes in the changes dict.\n '''\n if isinstance(config, dict):\n if not update_config:\n if config:\n # the updated config is more valid--report that we are using it\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n elif not isinstance(update_config, dict):\n # new config is a dict, other isn't--new one wins\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n else:\n # compare each key in the base config with the values in the\n # update_config, overwriting the values that are different but\n # keeping any that are not defined in config\n for key, value in six.iteritems(config):\n _namespace = key\n if namespace:\n _namespace = '{0}.{1}'.format(namespace, _namespace)\n update_config[key] = compare_and_update_config(\n value,\n update_config.get(key, None),\n changes,\n namespace=_namespace,\n )\n return update_config\n\n elif isinstance(config, list):\n if not update_config:\n if config:\n # the updated config is more valid--report that we are using it\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n elif not isinstance(update_config, list):\n # new config is a list, other isn't--new one wins\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n else:\n # iterate through config list, ensuring that each index in the\n # update_config list is the same\n for idx, item in enumerate(config):\n _namespace = '[{0}]'.format(idx)\n if namespace:\n _namespace = '{0}{1}'.format(namespace, _namespace)\n _update = None\n if len(update_config) > idx:\n _update = update_config[idx]\n if _update:\n update_config[idx] = compare_and_update_config(\n config[idx],\n _update,\n changes,\n namespace=_namespace,\n )\n else:\n changes[_namespace] = {\n 'new': config[idx],\n 'old': _update,\n }\n update_config.append(config[idx])\n\n if len(update_config) > len(config):\n # trim any items in update_config that are not in config\n for idx, old_item in enumerate(update_config):\n if idx < len(config):\n continue\n _namespace = '[{0}]'.format(idx)\n if namespace:\n _namespace = '{0}{1}'.format(namespace, _namespace)\n changes[_namespace] = {\n 'new': None,\n 'old': old_item,\n }\n del update_config[len(config):]\n return update_config\n\n else:\n if config != update_config:\n changes[namespace] = {\n 'new': config,\n 'old': update_config,\n }\n return config\n"
] |
# -*- coding: utf-8 -*-
'''
Utilities for comparing and updating configurations while keeping track of
changes in a way that can be easily reported in a state.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.ext import six
|
saltstack/salt
|
salt/states/memcached.py
|
managed
|
python
|
def managed(name,
value=None,
host=DEFAULT_HOST,
port=DEFAULT_PORT,
time=DEFAULT_TIME,
min_compress_len=DEFAULT_MIN_COMPRESS_LEN):
'''
Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
try:
cur = __salt__['memcached.get'](name, host, port)
except CommandExecutionError as exc:
ret['comment'] = six.text_type(exc)
return ret
if cur == value:
ret['result'] = True
ret['comment'] = 'Key \'{0}\' does not need to be updated'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
if cur is None:
ret['comment'] = 'Key \'{0}\' would be added'.format(name)
else:
ret['comment'] = 'Value of key \'{0}\' would be changed'.format(name)
return ret
try:
ret['result'] = __salt__['memcached.set'](
name, value, host, port, time, min_compress_len
)
except (CommandExecutionError, SaltInvocationError) as exc:
ret['comment'] = six.text_type(exc)
else:
if ret['result']:
ret['comment'] = 'Successfully set key \'{0}\''.format(name)
if cur is not None:
ret['changes'] = {'old': cur, 'new': value}
else:
ret['changes'] = {'key added': name, 'value': value}
else:
ret['comment'] = 'Failed to set key \'{0}\''.format(name)
return ret
|
Manage a memcached key.
name
The key to manage
value
The value to set for that key
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.managed:
- value: bar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/memcached.py#L34-L101
| null |
# -*- coding: utf-8 -*-
'''
States for Management of Memcached Keys
=======================================
.. versionadded:: 2014.1.0
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.modules.memcached import (
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_TIME,
DEFAULT_MIN_COMPRESS_LEN
)
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
__virtualname__ = 'memcached'
def __virtual__():
'''
Only load if memcache module is available
'''
return __virtualname__ \
if '{0}.status'.format(__virtualname__) in __salt__ \
else False
def absent(name,
value=None,
host=DEFAULT_HOST,
port=DEFAULT_PORT,
time=DEFAULT_TIME):
'''
Ensure that a memcached key is not present.
name
The key
value : None
If specified, only ensure that the key is absent if it matches the
specified value.
host
The memcached server IP address
port
The memcached server port
.. code-block:: yaml
foo:
memcached.absent
bar:
memcached.absent:
- host: 10.0.0.1
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
try:
cur = __salt__['memcached.get'](name, host, port)
except CommandExecutionError as exc:
ret['comment'] = six.text_type(exc)
return ret
if value is not None:
if cur is not None and cur != value:
ret['result'] = True
ret['comment'] = (
'Value of key \'{0}\' (\'{1}\') is not \'{2}\''
.format(name, cur, value)
)
return ret
if cur is None:
ret['result'] = True
ret['comment'] = 'Key \'{0}\' does not exist'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Key \'{0}\' would be deleted'.format(name)
return ret
try:
ret['result'] = __salt__['memcached.delete'](name, host, port, time)
except (CommandExecutionError, SaltInvocationError) as exc:
ret['comment'] = six.text_type(exc)
else:
if ret['result']:
ret['comment'] = 'Successfully deleted key \'{0}\''.format(name)
ret['changes'] = {'key deleted': name, 'value': cur}
else:
ret['comment'] = 'Failed to delete key \'{0}\''.format(name)
return ret
|
saltstack/salt
|
salt/modules/dnsutil.py
|
parse_hosts
|
python
|
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
|
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L32-L60
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
hosts_append
|
python
|
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
|
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L63-L88
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n",
"def parse_hosts(hostsfile='/etc/hosts', hosts=None):\n '''\n Parse /etc/hosts file.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' dnsutil.parse_hosts\n '''\n if not hosts:\n try:\n with salt.utils.files.fopen(hostsfile, 'r') as fp_:\n hosts = salt.utils.stringutils.to_unicode(fp_.read())\n except Exception:\n return 'Error: hosts data was not found'\n\n hostsdict = {}\n for line in hosts.splitlines():\n if not line:\n continue\n if line.startswith('#'):\n continue\n comps = line.split()\n ip = comps[0]\n aliases = comps[1:]\n hostsdict.setdefault(ip, []).extend(aliases)\n\n return hostsdict\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
hosts_remove
|
python
|
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
|
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L91-L119
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
parse_zone
|
python
|
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
|
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L122-L197
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n",
"def _to_seconds(timestr):\n '''\n Converts a time value to seconds.\n\n As per RFC1035 (page 45), max time is 1 week, so anything longer (or\n unreadable) will be set to one week (604800 seconds).\n '''\n timestr = timestr.upper()\n if 'H' in timestr:\n seconds = int(timestr.replace('H', '')) * 3600\n elif 'D' in timestr:\n seconds = int(timestr.replace('D', '')) * 86400\n elif 'W' in timestr:\n seconds = 604800\n else:\n try:\n seconds = int(timestr)\n except ValueError:\n seconds = 604800\n if seconds > 604800:\n seconds = 604800\n return seconds\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
_to_seconds
|
python
|
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
|
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L200-L221
| null |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
A
|
python
|
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
|
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L249-L271
|
[
"def _has_dig():\n '''\n The dig-specific functions have been moved into their own module, but\n because they are also DNS utilities, a compatibility layer exists. This\n function helps add that layer.\n '''\n return salt.utils.path.which('dig') is not None\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
AAAA
|
python
|
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
|
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L274-L298
|
[
"def _has_dig():\n '''\n The dig-specific functions have been moved into their own module, but\n because they are also DNS utilities, a compatibility layer exists. This\n function helps add that layer.\n '''\n return salt.utils.path.which('dig') is not None\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
NS
|
python
|
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
|
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L301-L317
|
[
"def _has_dig():\n '''\n The dig-specific functions have been moved into their own module, but\n because they are also DNS utilities, a compatibility layer exists. This\n function helps add that layer.\n '''\n return salt.utils.path.which('dig') is not None\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
MX
|
python
|
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
|
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L340-L360
|
[
"def _has_dig():\n '''\n The dig-specific functions have been moved into their own module, but\n because they are also DNS utilities, a compatibility layer exists. This\n function helps add that layer.\n '''\n return salt.utils.path.which('dig') is not None\n"
] |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
saltstack/salt
|
salt/modules/dnsutil.py
|
serial
|
python
|
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = six.text_type(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
|
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dnsutil.py#L363-L401
| null |
# -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.files.fopen(hostsfile, 'r') as fp_:
hosts = salt.utils.stringutils.to_unicode(fp_.read())
host_list = entries.split(',')
with salt.utils.files.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write(salt.utils.stringutils.to_str('{0}\n'.format(line)))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(salt.utils.stringutils.to_str(' '.join(comps)))
out_file.write(salt.utils.stringutils.to_str('\n'))
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.path.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
|
saltstack/salt
|
salt/master.py
|
Maintenance._post_fork_init
|
python
|
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
|
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L179-L212
| null |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
saltstack/salt
|
salt/master.py
|
Maintenance.run
|
python
|
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
|
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L214-L245
|
[
"def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n",
"def clean_old_jobs(opts):\n '''\n Clean out the old jobs from the job cache\n '''\n # TODO: better way to not require creating the masterminion every time?\n mminion = salt.minion.MasterMinion(\n opts,\n states=False,\n rend=False,\n )\n # If the master job cache has a clean_old_jobs, call it\n fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache'])\n if fstr in mminion.returners:\n mminion.returners[fstr]()\n",
"def clean_expired_tokens(opts):\n '''\n Clean expired tokens from the master\n '''\n loadauth = salt.auth.LoadAuth(opts)\n for tok in loadauth.list_tokens():\n token_data = loadauth.get_tok(tok)\n if 'expire' not in token_data or token_data.get('expire', 0) < time.time():\n loadauth.rm_token(tok)\n",
"def clean_pub_auth(opts):\n try:\n auth_cache = os.path.join(opts['cachedir'], 'publish_auth')\n if not os.path.exists(auth_cache):\n return\n else:\n for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(auth_cache):\n for auth_file in filenames:\n auth_file_path = os.path.join(dirpath, auth_file)\n if not os.path.isfile(auth_file_path):\n continue\n if (time.time() - os.path.getmtime(auth_file_path) >\n (opts['keep_jobs'] * 3600)):\n os.remove(auth_file_path)\n except (IOError, OSError):\n log.error('Unable to delete pub auth file')\n",
"def clean_proc_dir(opts):\n '''\n Clean out old tracked jobs running on the master\n\n Generally, anything tracking a job should remove the job\n once the job has finished. However, this will remove any\n jobs that for some reason were not properly removed\n when finished or errored.\n '''\n serial = salt.payload.Serial(opts)\n proc_dir = os.path.join(opts['cachedir'], 'proc')\n for fn_ in os.listdir(proc_dir):\n proc_file = os.path.join(*[proc_dir, fn_])\n data = salt.utils.master.read_proc_file(proc_file, opts)\n if not data:\n try:\n log.warning(\n \"Found proc file %s without proper data. Removing from tracked proc files.\",\n proc_file\n )\n os.remove(proc_file)\n except (OSError, IOError) as err:\n log.error('Unable to remove proc file: %s.', err)\n continue\n if not salt.utils.master.is_pid_healthy(data['pid']):\n try:\n log.warning(\n \"PID %s not owned by salt or no longer running. Removing tracked proc file %s\",\n data['pid'],\n proc_file\n )\n os.remove(proc_file)\n except (OSError, IOError) as err:\n log.error('Unable to remove proc file: %s.', err)\n",
"def check_max_open_files(opts):\n '''\n Check the number of max allowed open files and adjust if needed\n '''\n mof_c = opts.get('max_open_files', 100000)\n if sys.platform.startswith('win'):\n # Check the Windows API for more detail on this\n # http://msdn.microsoft.com/en-us/library/xt874334(v=vs.71).aspx\n # and the python binding http://timgolden.me.uk/pywin32-docs/win32file.html\n mof_s = mof_h = win32file._getmaxstdio()\n else:\n mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)\n\n accepted_keys_dir = os.path.join(opts.get('pki_dir'), 'minions')\n accepted_count = len(os.listdir(accepted_keys_dir))\n\n log.debug(\n 'This salt-master instance has accepted %s minion keys.',\n accepted_count\n )\n\n level = logging.INFO\n\n if (accepted_count * 4) <= mof_s:\n # We check for the soft value of max open files here because that's the\n # value the user chose to raise to.\n #\n # The number of accepted keys multiplied by four(4) is lower than the\n # soft value, everything should be OK\n return\n\n msg = (\n 'The number of accepted minion keys({0}) should be lower than 1/4 '\n 'of the max open files soft setting({1}). '.format(\n accepted_count, mof_s\n )\n )\n\n if accepted_count >= mof_s:\n # This should never occur, it might have already crashed\n msg += 'salt-master will crash pretty soon! '\n level = logging.CRITICAL\n elif (accepted_count * 2) >= mof_s:\n # This is way too low, CRITICAL\n level = logging.CRITICAL\n elif (accepted_count * 3) >= mof_s:\n level = logging.WARNING\n # The accepted count is more than 3 time, WARN\n elif (accepted_count * 4) >= mof_s:\n level = logging.INFO\n\n if mof_c < mof_h:\n msg += ('According to the system\\'s hard limit, there\\'s still a '\n 'margin of {0} to raise the salt\\'s max_open_files '\n 'setting. ').format(mof_h - mof_c)\n\n msg += 'Please consider raising this value.'\n log.log(level=level, msg=msg)\n"
] |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
saltstack/salt
|
salt/master.py
|
Maintenance.handle_key_cache
|
python
|
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
|
Evaluate accepted keys and create a msgpack file
which contains a list
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L247-L270
|
[
"def atomic_open(filename, mode='w'):\n '''\n Works like a regular `open()` but writes updates into a temporary\n file instead of the given file and moves it over when the file is\n closed. The file returned behaves as if it was a regular Python\n '''\n if mode in ('r', 'rb', 'r+', 'rb+', 'a', 'ab'):\n raise TypeError('Read or append modes don\\'t work with atomic_open')\n kwargs = {\n 'prefix': '.___atomic_write',\n 'dir': os.path.dirname(filename),\n 'delete': False,\n }\n if six.PY3 and 'b' not in mode:\n kwargs['newline'] = ''\n ntf = tempfile.NamedTemporaryFile(mode, **kwargs)\n return _AtomicWFile(ntf, ntf.name, filename)\n"
] |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
saltstack/salt
|
salt/master.py
|
Maintenance.handle_key_rotate
|
python
|
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
|
Rotate the AES key rotation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L272-L310
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
saltstack/salt
|
salt/master.py
|
Maintenance.handle_git_pillar
|
python
|
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
|
Update git pillar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L312-L321
| null |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
saltstack/salt
|
salt/master.py
|
Maintenance.handle_schedule
|
python
|
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
|
Evaluate the scheduler
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L323-L334
| null |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
saltstack/salt
|
salt/master.py
|
Maintenance.handle_presence
|
python
|
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
|
Fire presence events if enabled
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L336-L354
|
[
"def tagify(suffix='', prefix='', base=SALT):\n '''\n convenience function to build a namespaced event tag string\n from joining with the TABPART character the base, prefix and suffix\n\n If string prefix is a valid key in TAGS Then use the value of key prefix\n Else use prefix string\n\n If suffix is a list Then join all string elements of suffix individually\n Else use string suffix\n\n '''\n parts = [base, TAGS.get(prefix, prefix)]\n if hasattr(suffix, 'append'): # list so extend parts\n parts.extend(suffix)\n else: # string so append\n parts.append(suffix)\n\n for index, _ in enumerate(parts):\n try:\n parts[index] = salt.utils.stringutils.to_str(parts[index])\n except TypeError:\n parts[index] = str(parts[index])\n return TAGPARTER.join([part for part in parts if part])\n"
] |
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
|
saltstack/salt
|
salt/master.py
|
FileserverUpdate.fill_buckets
|
python
|
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
|
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L385-L436
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
|
saltstack/salt
|
salt/master.py
|
FileserverUpdate.update_fileserver
|
python
|
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
|
Threading target which handles all updates for a given wait interval
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L438-L477
|
[
"def _do_update():\n log.debug(\n 'Performing fileserver updates for items with an update '\n 'interval of %d', interval\n )\n for backend, update_args in six.iteritems(backends):\n backend_name, update_func = backend\n try:\n if update_args:\n log.debug(\n 'Updating %s fileserver cache for the following '\n 'targets: %s', backend_name, update_args\n )\n args = (update_args,)\n else:\n log.debug('Updating %s fileserver cache', backend_name)\n args = ()\n\n update_func(*args)\n except Exception as exc:\n log.exception(\n 'Uncaught exception while updating %s fileserver '\n 'cache', backend_name\n )\n\n log.debug(\n 'Completed fileserver updates for items with an update '\n 'interval of %d, waiting %d seconds', interval, interval\n )\n"
] |
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
|
saltstack/salt
|
salt/master.py
|
FileserverUpdate.run
|
python
|
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
|
Start the update threads
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L479-L496
|
[
"def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n",
"def clean_fsbackend(opts):\n '''\n Clean out the old fileserver backends\n '''\n # Clear remote fileserver backend caches so they get recreated\n for backend in ('git', 'hg', 'svn'):\n if backend in opts['fileserver_backend']:\n env_cache = os.path.join(\n opts['cachedir'],\n '{0}fs'.format(backend),\n 'envs.p'\n )\n if os.path.isfile(env_cache):\n log.debug('Clearing %sfs env cache', backend)\n try:\n os.remove(env_cache)\n except OSError as exc:\n log.critical(\n 'Unable to clear env cache file %s: %s',\n env_cache, exc\n )\n\n file_lists_dir = os.path.join(\n opts['cachedir'],\n 'file_lists',\n '{0}fs'.format(backend)\n )\n try:\n file_lists_caches = os.listdir(file_lists_dir)\n except OSError:\n continue\n for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):\n cache_file = os.path.join(file_lists_dir, file_lists_cache)\n try:\n os.remove(cache_file)\n except OSError as exc:\n log.critical(\n 'Unable to file_lists cache file %s: %s',\n cache_file, exc\n )\n"
] |
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
|
saltstack/salt
|
salt/master.py
|
Master._pre_flight
|
python
|
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
Run pre flight checks. If anything in this method fails then the master
should not start up.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L563-L644
| null |
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue})
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = log_queue
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
|
saltstack/salt
|
salt/master.py
|
Master.start
|
python
|
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue})
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = log_queue
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
|
Turn on the master server components
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L646-L774
|
[
"def get_user():\n '''\n Get the current user\n '''\n if HAS_PWD:\n ret = pwd.getpwuid(os.geteuid()).pw_name\n elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:\n ret = salt.utils.win_functions.get_current_user()\n else:\n raise CommandExecutionError(\n 'Required external library (pwd or win32api) not installed')\n return salt.utils.stringutils.to_unicode(ret)\n",
"def iter_transport_opts(opts):\n '''\n Yield transport, opts for all master configured transports\n '''\n transports = set()\n\n for transport, opts_overrides in six.iteritems(opts.get('transport_opts', {})):\n t_opts = dict(opts)\n t_opts.update(opts_overrides)\n t_opts['transport'] = transport\n transports.add(transport)\n yield transport, t_opts\n\n if opts['transport'] not in transports:\n yield opts['transport'], opts\n",
"def start_engines(opts, proc_mgr, proxy=None):\n '''\n Fire up the configured engines!\n '''\n utils = salt.loader.utils(opts, proxy=proxy)\n if opts['__role'] == 'master':\n runners = salt.loader.runner(opts, utils=utils)\n else:\n runners = []\n funcs = salt.loader.minion_mods(opts, utils=utils, proxy=proxy)\n engines = salt.loader.engines(opts, funcs, runners, utils, proxy=proxy)\n\n engines_opt = opts.get('engines', [])\n if isinstance(engines_opt, dict):\n engines_opt = [{k: v} for k, v in engines_opt.items()]\n\n # Function references are not picklable. Windows needs to pickle when\n # spawning processes. On Windows, these will need to be recalculated\n # in the spawned child process.\n if salt.utils.platform.is_windows():\n runners = None\n utils = None\n funcs = None\n\n for engine in engines_opt:\n if isinstance(engine, dict):\n engine, engine_opts = next(iter(engine.items()))\n else:\n engine_opts = None\n engine_name = None\n if engine_opts is not None and 'engine_module' in engine_opts:\n fun = '{0}.start'.format(engine_opts['engine_module'])\n engine_name = engine\n del engine_opts['engine_module']\n else:\n fun = '{0}.start'.format(engine)\n if fun in engines:\n start_func = engines[fun]\n if engine_name:\n name = '{0}.Engine({1}-{2})'.format(__name__,\n start_func.__module__,\n engine_name)\n else:\n name = '{0}.Engine({1})'.format(__name__,\n start_func.__module__)\n log.info('Starting Engine %s', name)\n proc_mgr.add_process(\n Engine,\n args=(\n opts,\n fun,\n engine_opts,\n funcs,\n runners,\n proxy\n ),\n name=name\n )\n",
"def enable_sigusr1_handler():\n '''\n Pretty print a stack trace to the console or a debug log under /tmp\n when any of the salt daemons such as salt-master are sent a SIGUSR1\n '''\n enable_sig_handler('SIGUSR1', _handle_sigusr1)\n # Also canonical BSD-way of printing progress is SIGINFO\n # which on BSD-derivatives can be sent via Ctrl+T\n enable_sig_handler('SIGINFO', _handle_sigusr1)\n",
"def enable_sigusr2_handler():\n '''\n Toggle YAPPI profiler\n '''\n enable_sig_handler('SIGUSR2', _handle_sigusr2)\n",
"def get_multiprocessing_logging_queue():\n global __MP_LOGGING_QUEUE\n\n if __MP_IN_MAINPROCESS is False:\n # We're not in the MainProcess, return! No Queue shall be instantiated\n return __MP_LOGGING_QUEUE\n\n if __MP_LOGGING_QUEUE is None:\n __MP_LOGGING_QUEUE = multiprocessing.Queue()\n return __MP_LOGGING_QUEUE\n",
"def generate_key_string(cls, key_size=192):\n key = os.urandom(key_size // 8 + cls.SIG_SIZE)\n b64key = base64.b64encode(key)\n if six.PY3:\n b64key = b64key.decode('utf-8')\n # Return data must be a base64-encoded string, not a unicode type\n return b64key.replace('\\n', '')\n",
"def __set_max_open_files(self):\n if not HAS_RESOURCE:\n return\n # Let's check to see how our max open files(ulimit -n) setting is\n mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)\n if mof_h == resource.RLIM_INFINITY:\n # Unclear what to do with infinity... macOS reports RLIM_INFINITY as\n # hard limit,but raising to anything above soft limit fails...\n mof_h = mof_s\n log.info(\n 'Current values for max open files soft/hard setting: %s/%s',\n mof_s, mof_h\n )\n # Let's grab, from the configuration file, the value to raise max open\n # files to\n mof_c = self.opts['max_open_files']\n if mof_c > mof_h:\n # The configured value is higher than what's allowed\n log.info(\n 'The value for the \\'max_open_files\\' setting, %s, is higher '\n 'than the highest value the user running salt is allowed to '\n 'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h\n )\n mof_c = mof_h\n\n if mof_s < mof_c:\n # There's room to raise the value. Raise it!\n log.info('Raising max open files value to %s', mof_c)\n resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))\n try:\n mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)\n log.info(\n 'New values for max open files soft/hard values: %s/%s',\n mof_s, mof_h\n )\n except ValueError:\n # https://github.com/saltstack/salt/issues/1991#issuecomment-13025595\n # A user under macOS reported that our 100000 default value is\n # still too high.\n log.critical(\n 'Failed to raise max open files setting to %s. If this '\n 'value is too low, the salt-master will most likely fail '\n 'to run properly.', mof_c\n )\n",
"def _pre_flight(self):\n '''\n Run pre flight checks. If anything in this method fails then the master\n should not start up.\n '''\n errors = []\n critical_errors = []\n\n try:\n os.chdir('/')\n except OSError as err:\n errors.append(\n 'Cannot change to root directory ({0})'.format(err)\n )\n\n if self.opts.get('fileserver_verify_config', True):\n # Avoid circular import\n import salt.fileserver\n fileserver = salt.fileserver.Fileserver(self.opts)\n if not fileserver.servers:\n errors.append(\n 'Failed to load fileserver backends, the configured backends '\n 'are: {0}'.format(', '.join(self.opts['fileserver_backend']))\n )\n else:\n # Run init() for all backends which support the function, to\n # double-check configuration\n try:\n fileserver.init()\n except salt.exceptions.FileserverConfigError as exc:\n critical_errors.append('{0}'.format(exc))\n\n if not self.opts['fileserver_backend']:\n errors.append('No fileserver backends are configured')\n\n # Check to see if we need to create a pillar cache dir\n if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):\n try:\n with salt.utils.files.set_umask(0o077):\n os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))\n except OSError:\n pass\n\n if self.opts.get('git_pillar_verify_config', True):\n try:\n git_pillars = [\n x for x in self.opts.get('ext_pillar', [])\n if 'git' in x\n and not isinstance(x['git'], six.string_types)\n ]\n except TypeError:\n git_pillars = []\n critical_errors.append(\n 'Invalid ext_pillar configuration. It is likely that the '\n 'external pillar type was not specified for one or more '\n 'external pillars.'\n )\n if git_pillars:\n try:\n new_opts = copy.deepcopy(self.opts)\n import salt.pillar.git_pillar\n for repo in git_pillars:\n new_opts['ext_pillar'] = [repo]\n try:\n git_pillar = salt.utils.gitfs.GitPillar(\n new_opts,\n repo['git'],\n per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,\n per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,\n global_only=salt.pillar.git_pillar.GLOBAL_ONLY)\n except salt.exceptions.FileserverConfigError as exc:\n critical_errors.append(exc.strerror)\n finally:\n del new_opts\n\n if errors or critical_errors:\n for error in errors:\n log.error(error)\n for error in critical_errors:\n log.critical(error)\n log.critical('Master failed pre flight checks, exiting\\n')\n sys.exit(salt.defaults.exitcodes.EX_GENERIC)\n",
"def add_process(self, tgt, args=None, kwargs=None, name=None):\n '''\n Create a processes and args + kwargs\n This will deterimine if it is a Process class, otherwise it assumes\n it is a function\n '''\n if args is None:\n args = []\n\n if kwargs is None:\n kwargs = {}\n\n if salt.utils.platform.is_windows():\n # Need to ensure that 'log_queue' and 'log_queue_level' is\n # correctly transferred to processes that inherit from\n # 'MultiprocessingProcess'.\n if type(MultiprocessingProcess) is type(tgt) and (\n issubclass(tgt, MultiprocessingProcess)):\n need_log_queue = True\n else:\n need_log_queue = False\n\n if need_log_queue:\n if 'log_queue' not in kwargs:\n if hasattr(self, 'log_queue'):\n kwargs['log_queue'] = self.log_queue\n else:\n kwargs['log_queue'] = (\n salt.log.setup.get_multiprocessing_logging_queue()\n )\n if 'log_queue_level' not in kwargs:\n if hasattr(self, 'log_queue_level'):\n kwargs['log_queue_level'] = self.log_queue_level\n else:\n kwargs['log_queue_level'] = (\n salt.log.setup.get_multiprocessing_logging_level()\n )\n\n # create a nicer name for the debug log\n if name is None:\n if isinstance(tgt, types.FunctionType):\n name = '{0}.{1}'.format(\n tgt.__module__,\n tgt.__name__,\n )\n else:\n name = '{0}{1}.{2}'.format(\n tgt.__module__,\n '.{0}'.format(tgt.__class__) if six.text_type(tgt.__class__) != \"<type 'type'>\" else '',\n tgt.__name__,\n )\n\n if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):\n process = tgt(*args, **kwargs)\n else:\n process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)\n\n if isinstance(process, SignalHandlingMultiprocessingProcess):\n with default_signals(signal.SIGINT, signal.SIGTERM):\n process.start()\n else:\n process.start()\n log.debug(\"Started '%s' with pid %s\", name, process.pid)\n self._process_map[process.pid] = {'tgt': tgt,\n 'args': args,\n 'kwargs': kwargs,\n 'Process': process}\n return process\n",
"def factory(opts, **kwargs):\n # Default to ZeroMQ for now\n ttype = 'zeromq'\n\n # determine the ttype\n if 'transport' in opts:\n ttype = opts['transport']\n elif 'transport' in opts.get('pillar', {}).get('master', {}):\n ttype = opts['pillar']['master']['transport']\n\n # switch on available ttypes\n if ttype == 'zeromq':\n import salt.transport.zeromq\n return salt.transport.zeromq.ZeroMQPubServerChannel(opts, **kwargs)\n elif ttype == 'tcp':\n import salt.transport.tcp\n return salt.transport.tcp.TCPPubServerChannel(opts)\n elif ttype == 'local': # TODO:\n import salt.transport.local\n return salt.transport.local.LocalPubServerChannel(opts, **kwargs)\n else:\n raise Exception('Channels are only defined for ZeroMQ and TCP')\n"
] |
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
|
saltstack/salt
|
salt/master.py
|
Halite.run
|
python
|
def run(self):
'''
Fire up halite!
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
|
Fire up halite!
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L817-L822
|
[
"def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n"
] |
class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
|
saltstack/salt
|
salt/master.py
|
ReqServer.__bind
|
python
|
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
|
Binds the reply server
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L876-L937
| null |
class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super(ReqServer, self)._handle_signals(signum, sigframe)
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
def __del__(self):
self.destroy()
|
saltstack/salt
|
salt/master.py
|
MWorker.__bind
|
python
|
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
|
Bind to the local port
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1024-L1038
| null |
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
|
saltstack/salt
|
salt/master.py
|
MWorker._handle_payload
|
python
|
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
|
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1041-L1066
| null |
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
|
saltstack/salt
|
salt/master.py
|
MWorker._post_stats
|
python
|
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
|
Fire events with stat info if it's time
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1068-L1077
| null |
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
|
saltstack/salt
|
salt/master.py
|
MWorker._handle_clear
|
python
|
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
|
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1079-L1097
| null |
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
|
saltstack/salt
|
salt/master.py
|
MWorker._handle_aes
|
python
|
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
|
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1099-L1128
| null |
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
|
saltstack/salt
|
salt/master.py
|
MWorker.run
|
python
|
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
|
Start a Master Worker
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1130-L1141
|
[
"def reinit_crypto():\n '''\n When a fork arises, pycrypto needs to reinit\n From its doc::\n\n Caveat: For the random number generator to work correctly,\n you must call Random.atfork() in both the parent and\n child processes after using os.fork()\n\n '''\n if HAS_CRYPTO:\n Crypto.Random.atfork()\n",
"def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n"
] |
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
|
saltstack/salt
|
salt/master.py
|
AESFuncs.__setup_fileserver
|
python
|
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
|
Set the local file objects from the file server interface
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1176-L1191
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.__verify_minion
|
python
|
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
|
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1193-L1229
|
[
"def valid_id(opts, id_):\n '''\n Returns if the passed id is valid\n '''\n try:\n if any(x in id_ for x in ('/', '\\\\', str('\\0'))):\n return False\n return bool(clean_path(opts['pki_dir'], id_))\n except (AttributeError, KeyError, TypeError, UnicodeDecodeError):\n return False\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.__verify_minion_publish
|
python
|
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
|
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1244-L1294
|
[
"def __verify_minion(self, id_, token):\n '''\n Take a minion id and a string signed with the minion private key\n The string needs to verify as 'salt' with the minion public key\n\n :param str id_: A minion ID\n :param str token: A string signed with the minion private key\n\n :rtype: bool\n :return: Boolean indicating whether or not the token can be verified.\n '''\n if not salt.utils.verify.valid_id(self.opts, id_):\n return False\n pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)\n\n try:\n pub = salt.crypt.get_rsa_pub_key(pub_path)\n except (IOError, OSError):\n log.warning(\n 'Salt minion claiming to be %s attempted to communicate with '\n 'master, but key could not be read and verification was denied.',\n id_\n )\n return False\n except (ValueError, IndexError, TypeError) as err:\n log.error('Unable to load public key \"%s\": %s', pub_path, err)\n try:\n if salt.crypt.public_decrypt(pub, token) == b'salt':\n return True\n except ValueError as err:\n log.error('Unable to decrypt token: %s', err)\n\n log.error(\n 'Salt minion claiming to be %s has attempted to communicate with '\n 'the master and could not be verified', id_\n )\n return False\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.__verify_load
|
python
|
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
|
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1296-L1326
|
[
"def inspect_stack():\n '''\n Return a string of which function we are currently in.\n '''\n return {'co_name': inspect.stack()[1][3]}\n",
"def __verify_minion(self, id_, token):\n '''\n Take a minion id and a string signed with the minion private key\n The string needs to verify as 'salt' with the minion public key\n\n :param str id_: A minion ID\n :param str token: A string signed with the minion private key\n\n :rtype: bool\n :return: Boolean indicating whether or not the token can be verified.\n '''\n if not salt.utils.verify.valid_id(self.opts, id_):\n return False\n pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)\n\n try:\n pub = salt.crypt.get_rsa_pub_key(pub_path)\n except (IOError, OSError):\n log.warning(\n 'Salt minion claiming to be %s attempted to communicate with '\n 'master, but key could not be read and verification was denied.',\n id_\n )\n return False\n except (ValueError, IndexError, TypeError) as err:\n log.error('Unable to load public key \"%s\": %s', pub_path, err)\n try:\n if salt.crypt.public_decrypt(pub, token) == b'salt':\n return True\n except ValueError as err:\n log.error('Unable to decrypt token: %s', err)\n\n log.error(\n 'Salt minion claiming to be %s has attempted to communicate with '\n 'the master and could not be verified', id_\n )\n return False\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._master_tops
|
python
|
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
|
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1328-L1339
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._mine_get
|
python
|
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
|
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1379-L1392
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._mine
|
python
|
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
|
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1394-L1406
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._mine_delete
|
python
|
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
|
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1408-L1421
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._mine_flush
|
python
|
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
|
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1423-L1433
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._file_recv
|
python
|
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
|
Allows minions to send files to the master, files are sent to the
master file cache
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1435-L1514
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._pillar
|
python
|
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
|
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1516-L1549
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._minion_event
|
python
|
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
|
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1551-L1564
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._handle_minion_event
|
python
|
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
|
Act on specific events from minions
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1566-L1595
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._return
|
python
|
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
|
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1597-L1636
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs._syndic_return
|
python
|
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
|
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1638-L1681
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.minion_runner
|
python
|
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
|
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1683-L1696
|
[
"def __verify_load(self, load, verify_keys):\n '''\n A utility function to perform common verification steps.\n\n :param dict load: A payload received from a minion\n :param list verify_keys: A list of strings that should be present in a\n given load\n\n :rtype: bool\n :rtype: dict\n :return: The original load (except for the token) if the load can be\n verified. False if the load is invalid.\n '''\n if any(key not in load for key in verify_keys):\n return False\n if 'tok' not in load:\n log.error(\n 'Received incomplete call from %s for \\'%s\\', missing \\'%s\\'',\n load['id'], inspect_stack()['co_name'], 'tok'\n )\n return False\n if not self.__verify_minion(load['id'], load['tok']):\n # The minion is not who it says it is!\n # We don't want to listen to it!\n log.warning('Minion id %s is not who it says it is!', load['id'])\n return False\n\n if 'tok' in load:\n load.pop('tok')\n\n return load\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.pub_ret
|
python
|
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
|
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1698-L1722
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def __verify_load(self, load, verify_keys):\n '''\n A utility function to perform common verification steps.\n\n :param dict load: A payload received from a minion\n :param list verify_keys: A list of strings that should be present in a\n given load\n\n :rtype: bool\n :rtype: dict\n :return: The original load (except for the token) if the load can be\n verified. False if the load is invalid.\n '''\n if any(key not in load for key in verify_keys):\n return False\n if 'tok' not in load:\n log.error(\n 'Received incomplete call from %s for \\'%s\\', missing \\'%s\\'',\n load['id'], inspect_stack()['co_name'], 'tok'\n )\n return False\n if not self.__verify_minion(load['id'], load['tok']):\n # The minion is not who it says it is!\n # We don't want to listen to it!\n log.warning('Minion id %s is not who it says it is!', load['id'])\n return False\n\n if 'tok' in load:\n load.pop('tok')\n\n return load\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.minion_pub
|
python
|
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
|
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1724-L1757
|
[
"def __verify_minion_publish(self, clear_load):\n '''\n Verify that the passed information authorized a minion to execute\n\n :param dict clear_load: A publication load from a minion\n\n :rtype: bool\n :return: A boolean indicating if the minion is allowed to publish the command in the load\n '''\n # Verify that the load is valid\n if 'peer' not in self.opts:\n return False\n if not isinstance(self.opts['peer'], dict):\n return False\n if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):\n return False\n # If the command will make a recursive publish don't run\n if clear_load['fun'].startswith('publish.'):\n return False\n # Check the permissions for this minion\n if not self.__verify_minion(clear_load['id'], clear_load['tok']):\n # The minion is not who it says it is!\n # We don't want to listen to it!\n log.warning(\n 'Minion id %s is not who it says it is and is attempting '\n 'to issue a peer command', clear_load['id']\n )\n return False\n clear_load.pop('tok')\n perms = []\n for match in self.opts['peer']:\n if re.match(match, clear_load['id']):\n # This is the list of funcs/modules!\n if isinstance(self.opts['peer'][match], list):\n perms.extend(self.opts['peer'][match])\n if ',' in clear_load['fun']:\n # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]\n clear_load['fun'] = clear_load['fun'].split(',')\n arg_ = []\n for arg in clear_load['arg']:\n arg_.append(arg.split())\n clear_load['arg'] = arg_\n\n # finally, check the auth of the load\n return self.ckminions.auth_check(\n perms,\n clear_load['fun'],\n clear_load['arg'],\n clear_load['tgt'],\n clear_load.get('tgt_type', 'glob'),\n publish_validate=True)\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.minion_publish
|
python
|
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
|
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1759-L1792
|
[
"def __verify_minion_publish(self, clear_load):\n '''\n Verify that the passed information authorized a minion to execute\n\n :param dict clear_load: A publication load from a minion\n\n :rtype: bool\n :return: A boolean indicating if the minion is allowed to publish the command in the load\n '''\n # Verify that the load is valid\n if 'peer' not in self.opts:\n return False\n if not isinstance(self.opts['peer'], dict):\n return False\n if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):\n return False\n # If the command will make a recursive publish don't run\n if clear_load['fun'].startswith('publish.'):\n return False\n # Check the permissions for this minion\n if not self.__verify_minion(clear_load['id'], clear_load['tok']):\n # The minion is not who it says it is!\n # We don't want to listen to it!\n log.warning(\n 'Minion id %s is not who it says it is and is attempting '\n 'to issue a peer command', clear_load['id']\n )\n return False\n clear_load.pop('tok')\n perms = []\n for match in self.opts['peer']:\n if re.match(match, clear_load['id']):\n # This is the list of funcs/modules!\n if isinstance(self.opts['peer'][match], list):\n perms.extend(self.opts['peer'][match])\n if ',' in clear_load['fun']:\n # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]\n clear_load['fun'] = clear_load['fun'].split(',')\n arg_ = []\n for arg in clear_load['arg']:\n arg_.append(arg.split())\n clear_load['arg'] = arg_\n\n # finally, check the auth of the load\n return self.ckminions.auth_check(\n perms,\n clear_load['fun'],\n clear_load['arg'],\n clear_load['tgt'],\n clear_load.get('tgt_type', 'glob'),\n publish_validate=True)\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.revoke_auth
|
python
|
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
|
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1794-L1818
|
[
"def __verify_load(self, load, verify_keys):\n '''\n A utility function to perform common verification steps.\n\n :param dict load: A payload received from a minion\n :param list verify_keys: A list of strings that should be present in a\n given load\n\n :rtype: bool\n :rtype: dict\n :return: The original load (except for the token) if the load can be\n verified. False if the load is invalid.\n '''\n if any(key not in load for key in verify_keys):\n return False\n if 'tok' not in load:\n log.error(\n 'Received incomplete call from %s for \\'%s\\', missing \\'%s\\'',\n load['id'], inspect_stack()['co_name'], 'tok'\n )\n return False\n if not self.__verify_minion(load['id'], load['tok']):\n # The minion is not who it says it is!\n # We don't want to listen to it!\n log.warning('Minion id %s is not who it says it is!', load['id'])\n return False\n\n if 'tok' in load:\n load.pop('tok')\n\n return load\n"
] |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
saltstack/salt
|
salt/master.py
|
AESFuncs.run_func
|
python
|
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
|
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1820-L1859
| null |
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
|
saltstack/salt
|
salt/master.py
|
ClearFuncs.runner
|
python
|
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
|
Send a master control function back to the runner system
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1894-L1947
|
[
"def get_user():\n '''\n Get the current user\n '''\n if HAS_PWD:\n ret = pwd.getpwuid(os.geteuid()).pw_name\n elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:\n ret = salt.utils.win_functions.get_current_user()\n else:\n raise CommandExecutionError(\n 'Required external library (pwd or win32api) not installed')\n return salt.utils.stringutils.to_unicode(ret)\n",
"def is_sudo(self):\n '''\n Determines if the user is running with sudo\n\n Returns True if the user is running with sudo and False if the\n user is not running with sudo\n '''\n return self.user.startswith('sudo_')\n",
"def _prep_auth_info(self, clear_load):\n sensitive_load_keys = []\n key = None\n if 'token' in clear_load:\n auth_type = 'token'\n err_name = 'TokenAuthenticationError'\n sensitive_load_keys = ['token']\n elif 'eauth' in clear_load:\n auth_type = 'eauth'\n err_name = 'EauthAuthenticationError'\n sensitive_load_keys = ['username', 'password']\n else:\n auth_type = 'user'\n err_name = 'UserAuthenticationError'\n key = self.key\n\n return auth_type, err_name, key, sensitive_load_keys\n",
"def asynchronous(self, fun, low, user='UNKNOWN', pub=None):\n '''\n Execute the function in a multiprocess and return the event tag to use\n to watch for the return\n '''\n async_pub = pub if pub is not None else self._gen_async_pub()\n\n proc = salt.utils.process.SignalHandlingMultiprocessingProcess(\n target=self._proc_function,\n args=(fun, low, user, async_pub['tag'], async_pub['jid']))\n with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):\n # Reset current signals before starting the process in\n # order not to inherit the current signal handlers\n proc.start()\n proc.join() # MUST join, otherwise we leave zombies all over\n return async_pub\n"
] |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs.wheel
|
python
|
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
|
Send a master control function back to the wheel system
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1949-L2018
|
[
"def get_user():\n '''\n Get the current user\n '''\n if HAS_PWD:\n ret = pwd.getpwuid(os.geteuid()).pw_name\n elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:\n ret = salt.utils.win_functions.get_current_user()\n else:\n raise CommandExecutionError(\n 'Required external library (pwd or win32api) not installed')\n return salt.utils.stringutils.to_unicode(ret)\n",
"def tagify(suffix='', prefix='', base=SALT):\n '''\n convenience function to build a namespaced event tag string\n from joining with the TABPART character the base, prefix and suffix\n\n If string prefix is a valid key in TAGS Then use the value of key prefix\n Else use prefix string\n\n If suffix is a list Then join all string elements of suffix individually\n Else use string suffix\n\n '''\n parts = [base, TAGS.get(prefix, prefix)]\n if hasattr(suffix, 'append'): # list so extend parts\n parts.extend(suffix)\n else: # string so append\n parts.append(suffix)\n\n for index, _ in enumerate(parts):\n try:\n parts[index] = salt.utils.stringutils.to_str(parts[index])\n except TypeError:\n parts[index] = str(parts[index])\n return TAGPARTER.join([part for part in parts if part])\n",
"def gen_jid(opts=None):\n '''\n Generate a jid\n '''\n if opts is None:\n salt.utils.versions.warn_until(\n 'Sodium',\n 'The `opts` argument was not passed into salt.utils.jid.gen_jid(). '\n 'This will be required starting in {version}.'\n )\n opts = {}\n global LAST_JID_DATETIME # pylint: disable=global-statement\n\n if opts.get('utc_jid', False):\n jid_dt = datetime.datetime.utcnow()\n else:\n jid_dt = datetime.datetime.now()\n if not opts.get('unique_jid', False):\n return '{0:%Y%m%d%H%M%S%f}'.format(jid_dt)\n if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:\n jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)\n LAST_JID_DATETIME = jid_dt\n return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid())\n",
"def is_sudo(self):\n '''\n Determines if the user is running with sudo\n\n Returns True if the user is running with sudo and False if the\n user is not running with sudo\n '''\n return self.user.startswith('sudo_')\n",
"def _prep_auth_info(self, clear_load):\n sensitive_load_keys = []\n key = None\n if 'token' in clear_load:\n auth_type = 'token'\n err_name = 'TokenAuthenticationError'\n sensitive_load_keys = ['token']\n elif 'eauth' in clear_load:\n auth_type = 'eauth'\n err_name = 'EauthAuthenticationError'\n sensitive_load_keys = ['username', 'password']\n else:\n auth_type = 'user'\n err_name = 'UserAuthenticationError'\n key = self.key\n\n return auth_type, err_name, key, sensitive_load_keys\n"
] |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs.mk_token
|
python
|
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
|
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2020-L2029
| null |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs.publish
|
python
|
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
|
This method sends out publications to the minions, it can only be used
by the LocalClient.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2063-L2174
|
[
"def user_is_blacklisted(self, user):\n '''\n Takes a username as a string and returns a boolean. True indicates that\n the provided user has been blacklisted\n '''\n return not salt.utils.stringutils.check_whitelist_blacklist(user, blacklist=self.blacklist.get('users', []))\n"
] |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs._prep_jid
|
python
|
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
|
Return a jid for this publication
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2194-L2217
| null |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs._send_pub
|
python
|
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
|
Take a load and send it across the network to connected minions
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2219-L2225
| null |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs._send_ssh_pub
|
python
|
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
|
Take a load and send it across the network to ssh minions
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2233-L2239
| null |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/master.py
|
ClearFuncs._prep_pub
|
python
|
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
|
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L2241-L2378
|
[
"def get_function_argspec(func, is_class_method=None):\n '''\n A small wrapper around getargspec that also supports callable classes\n :param is_class_method: Pass True if you are sure that the function being passed\n is a class method. The reason for this is that on Python 3\n ``inspect.ismethod`` only returns ``True`` for bound methods,\n while on Python 2, it returns ``True`` for bound and unbound\n methods. So, on Python 3, in case of a class method, you'd\n need the class to which the function belongs to be instantiated\n and this is not always wanted.\n '''\n if not callable(func):\n raise TypeError('{0} is not a callable'.format(func))\n\n if six.PY2:\n if is_class_method is True:\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = inspect.getargspec(func)\n elif inspect.ismethod(func):\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = inspect.getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n else:\n if is_class_method is True:\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = _getargspec(func) # pylint: disable=redefined-variable-type\n elif inspect.ismethod(func):\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = _getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n return aspec\n",
"def tagify(suffix='', prefix='', base=SALT):\n '''\n convenience function to build a namespaced event tag string\n from joining with the TABPART character the base, prefix and suffix\n\n If string prefix is a valid key in TAGS Then use the value of key prefix\n Else use prefix string\n\n If suffix is a list Then join all string elements of suffix individually\n Else use string suffix\n\n '''\n parts = [base, TAGS.get(prefix, prefix)]\n if hasattr(suffix, 'append'): # list so extend parts\n parts.extend(suffix)\n else: # string so append\n parts.append(suffix)\n\n for index, _ in enumerate(parts):\n try:\n parts[index] = salt.utils.stringutils.to_str(parts[index])\n except TypeError:\n parts[index] = str(parts[index])\n return TAGPARTER.join([part for part in parts if part])\n"
] |
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, extra, minions, missing):
batch_load = {}
batch_load.update(clear_load)
import salt.cli.batch_async
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = tornado.ioloop.IOLoop.current()
self._prep_pub(minions, batch.batch_jid, clear_load, extra, missing)
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, extra, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
saltstack/salt
|
salt/states/sqlite3.py
|
row_absent
|
python
|
def row_absent(name, db, table, where_sql, where_args=None):
'''
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = "More than one row matched the specified query"
elif len(rows) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be removed in " + table
changes['changes']['old'] = rows[0]
else:
if where_args is None:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql)
else:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql,
where_args)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row removed"
changes['changes']['old'] = rows[0]
else:
changes['result'] = False
changes['comment'] = "Unable to remove row"
else:
changes['result'] = True
changes['comment'] = 'Row is absent'
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
|
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/sqlite3.py#L117-L191
|
[
"def _query(conn, sql, parameters=None):\n cursor = None\n if parameters is None:\n cursor = conn.execute(sql)\n else:\n cursor = conn.execute(sql, parameters)\n return cursor.fetchall()\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SQLite3 databases
===============================
.. versionadded:: 2016.3.0
:depends: - SQLite3 Python Module
:configuration: See :py:mod:`salt.modules.sqlite3` for setup instructions
The sqlite3 module is used to create and manage sqlite3 databases
and execute queries
Here is an example of creating a table using sql statements:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/data/app.sqlite
- schema: CREATE TABLE `users` (`username` TEXT COLLATE NOCASE UNIQUE NOT NULL, `password` BLOB NOT NULL, `salt` BLOB NOT NULL, `last_login` INT)
Here is an example of creating a table using yaml/jinja instead of sql:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/app.sqlite
- schema:
- email TEXT COLLATE NOCASE UNIQUE NOT NULL
- firstname TEXT NOT NULL
- lastname TEXT NOT NULL
- company TEXT NOT NULL
- password BLOB NOT NULL
- salt BLOB NOT NULL
Here is an example of making sure a table is absent:
.. code-block:: yaml
badservers:
sqlite3.table_absent:
- db: /var/www/data/users.sqlite
Sometimes you would to have specific data in tables to be used by other services
Here is an example of making sure rows with specific data exist:
.. code-block:: yaml
user_john_doe_xyz:
sqlite3.row_present:
- db: /var/www/app.sqlite
- table: users
- where_sql: email='john.doe@companyxyz.com'
- data:
email: john.doe@companyxyz.com
lastname: doe
firstname: john
company: companyxyz.com
password: abcdef012934125
salt: abcdef012934125
- require:
- sqlite3: users
Here is an example of removing a row from a table:
.. code-block:: yaml
user_john_doe_abc:
sqlite3.row_absent:
- db: /var/www/app.sqlite
- table: users
- where_sql: email="john.doe@companyabc.com"
- require:
- sqlite3: users
Note that there is no explicit state to perform random queries, however, this
can be approximated with sqlite3's module functions and module.run:
.. code-block:: yaml
zone-delete:
module.run:
- name: sqlite3.modify
- db: {{ db }}
- sql: "DELETE FROM records WHERE id > {{ count[0] }} AND domain_id = {{ domain_id }}"
- watch:
- sqlite3: zone-insert-12
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
from salt.ext import six
try:
import sqlite3
HAS_SQLITE3 = True
except ImportError:
HAS_SQLITE3 = False
def __virtual__():
'''
Only load if the sqlite3 module is available
'''
return HAS_SQLITE3
def row_present(name,
db,
table,
data,
where_sql,
where_args=None,
update=False):
'''
Checks to make sure the given row exists. If row exists and update is True
then row will be updated with data. Otherwise it will leave existing
row unmodified and check it against data. If the existing data
doesn't match data_check the state will fail. If the row doesn't
exist then it will insert data into the table. If more than one
row matches, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check the data
data
The dictionary of key/value pairs to check against if
row exists, insert into the table if it doesn't
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
update
True will replace the existing row with data
When False and the row exists and data does not equal
the row data then the state will fail
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql)
else:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = 'More than one row matched the specified query'
elif len(rows) == 1:
for key, value in six.iteritems(data):
if key in rows[0] and rows[0][key] != value:
if update:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be update in " + table
else:
columns = []
params = []
for key, value in six.iteritems(data):
columns.append("`" + key + "`=?")
params.append(value)
if where_args is not None:
params += where_args
sql = "UPDATE `" + table + "` SET "
sql += ",".join(columns)
sql += " WHERE "
sql += where_sql
cursor = conn.execute(sql, params)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row updated"
changes['changes']['old'] = rows[0]
changes['changes']['new'] = data
else:
changes['result'] = False
changes['comment'] = "Row update failed"
else:
changes['result'] = False
changes['comment'] = "Existing data does" + \
"not match desired state"
break
if changes['result'] is None:
changes['result'] = True
changes['comment'] = "Row exists"
else:
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = "Row will be inserted into " + table
else:
columns = []
value_stmt = []
values = []
for key, value in six.iteritems(data):
value_stmt.append('?')
values.append(value)
columns.append("`" + key + "`")
sql = "INSERT INTO `" + table + "` ("
sql += ",".join(columns)
sql += ") VALUES ("
sql += ",".join(value_stmt)
sql += ")"
cursor = conn.execute(sql, values)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = 'Inserted row'
else:
changes['result'] = False
changes['comment'] = "Unable to insert data"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def table_absent(name, db):
'''
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, "SELECT sql FROM sqlite_master " +
" WHERE type='table' AND name=?", [name])
if len(tables) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "'" + name + "' will be dropped"
else:
conn.execute("DROP TABLE " + name)
conn.commit()
changes['changes']['old'] = tables[0][0]
changes['result'] = True
changes['comment'] = "'" + name + "' was dropped"
elif not tables:
changes['result'] = True
changes['comment'] = "'" + name + "' is already absent"
else:
changes['result'] = False
changes['comment'] = "Multiple tables with the same name='" + \
name + "'"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def table_present(name, db, schema, force=False):
'''
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn,
"SELECT sql FROM sqlite_master " +
"WHERE type='table' AND name=?", [name])
if len(tables) == 1:
sql = None
if isinstance(schema, six.string_types):
sql = schema.strip()
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__['test']:
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be replaced"
else:
conn.execute("DROP TABLE `" + name + "`")
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "Replaced '" + name + "'"
else:
changes['result'] = False
changes['comment'] = "Expected schema=" + sql + \
"\nactual schema=" + tables[0][0]
else:
changes['result'] = True
changes['comment'] = "'" + name + \
"' exists with matching schema"
elif not tables:
# Create the table
sql = None
if isinstance(schema, six.string_types):
sql = schema
else:
sql = _get_sql_from_schema(name, schema)
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be created"
else:
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "Created table '" + name + "'"
else:
changes['result'] = False
changes['comment'] = 'Multiple tables with the same name=' + name
except Exception as e:
changes['result'] = False
changes['comment'] = str(e)
finally:
if conn:
conn.close()
return changes
def _query(conn, sql, parameters=None):
cursor = None
if parameters is None:
cursor = conn.execute(sql)
else:
cursor = conn.execute(sql, parameters)
return cursor.fetchall()
def _get_sql_from_schema(name, schema):
return "CREATE TABLE `" + name + "` (" + ",".join(schema) + ")"
def _dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
saltstack/salt
|
salt/states/sqlite3.py
|
row_present
|
python
|
def row_present(name,
db,
table,
data,
where_sql,
where_args=None,
update=False):
'''
Checks to make sure the given row exists. If row exists and update is True
then row will be updated with data. Otherwise it will leave existing
row unmodified and check it against data. If the existing data
doesn't match data_check the state will fail. If the row doesn't
exist then it will insert data into the table. If more than one
row matches, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check the data
data
The dictionary of key/value pairs to check against if
row exists, insert into the table if it doesn't
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
update
True will replace the existing row with data
When False and the row exists and data does not equal
the row data then the state will fail
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql)
else:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = 'More than one row matched the specified query'
elif len(rows) == 1:
for key, value in six.iteritems(data):
if key in rows[0] and rows[0][key] != value:
if update:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be update in " + table
else:
columns = []
params = []
for key, value in six.iteritems(data):
columns.append("`" + key + "`=?")
params.append(value)
if where_args is not None:
params += where_args
sql = "UPDATE `" + table + "` SET "
sql += ",".join(columns)
sql += " WHERE "
sql += where_sql
cursor = conn.execute(sql, params)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row updated"
changes['changes']['old'] = rows[0]
changes['changes']['new'] = data
else:
changes['result'] = False
changes['comment'] = "Row update failed"
else:
changes['result'] = False
changes['comment'] = "Existing data does" + \
"not match desired state"
break
if changes['result'] is None:
changes['result'] = True
changes['comment'] = "Row exists"
else:
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = "Row will be inserted into " + table
else:
columns = []
value_stmt = []
values = []
for key, value in six.iteritems(data):
value_stmt.append('?')
values.append(value)
columns.append("`" + key + "`")
sql = "INSERT INTO `" + table + "` ("
sql += ",".join(columns)
sql += ") VALUES ("
sql += ",".join(value_stmt)
sql += ")"
cursor = conn.execute(sql, values)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = 'Inserted row'
else:
changes['result'] = False
changes['comment'] = "Unable to insert data"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
|
Checks to make sure the given row exists. If row exists and update is True
then row will be updated with data. Otherwise it will leave existing
row unmodified and check it against data. If the existing data
doesn't match data_check the state will fail. If the row doesn't
exist then it will insert data into the table. If more than one
row matches, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check the data
data
The dictionary of key/value pairs to check against if
row exists, insert into the table if it doesn't
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
update
True will replace the existing row with data
When False and the row exists and data does not equal
the row data then the state will fail
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/sqlite3.py#L194-L331
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _query(conn, sql, parameters=None):\n cursor = None\n if parameters is None:\n cursor = conn.execute(sql)\n else:\n cursor = conn.execute(sql, parameters)\n return cursor.fetchall()\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SQLite3 databases
===============================
.. versionadded:: 2016.3.0
:depends: - SQLite3 Python Module
:configuration: See :py:mod:`salt.modules.sqlite3` for setup instructions
The sqlite3 module is used to create and manage sqlite3 databases
and execute queries
Here is an example of creating a table using sql statements:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/data/app.sqlite
- schema: CREATE TABLE `users` (`username` TEXT COLLATE NOCASE UNIQUE NOT NULL, `password` BLOB NOT NULL, `salt` BLOB NOT NULL, `last_login` INT)
Here is an example of creating a table using yaml/jinja instead of sql:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/app.sqlite
- schema:
- email TEXT COLLATE NOCASE UNIQUE NOT NULL
- firstname TEXT NOT NULL
- lastname TEXT NOT NULL
- company TEXT NOT NULL
- password BLOB NOT NULL
- salt BLOB NOT NULL
Here is an example of making sure a table is absent:
.. code-block:: yaml
badservers:
sqlite3.table_absent:
- db: /var/www/data/users.sqlite
Sometimes you would to have specific data in tables to be used by other services
Here is an example of making sure rows with specific data exist:
.. code-block:: yaml
user_john_doe_xyz:
sqlite3.row_present:
- db: /var/www/app.sqlite
- table: users
- where_sql: email='john.doe@companyxyz.com'
- data:
email: john.doe@companyxyz.com
lastname: doe
firstname: john
company: companyxyz.com
password: abcdef012934125
salt: abcdef012934125
- require:
- sqlite3: users
Here is an example of removing a row from a table:
.. code-block:: yaml
user_john_doe_abc:
sqlite3.row_absent:
- db: /var/www/app.sqlite
- table: users
- where_sql: email="john.doe@companyabc.com"
- require:
- sqlite3: users
Note that there is no explicit state to perform random queries, however, this
can be approximated with sqlite3's module functions and module.run:
.. code-block:: yaml
zone-delete:
module.run:
- name: sqlite3.modify
- db: {{ db }}
- sql: "DELETE FROM records WHERE id > {{ count[0] }} AND domain_id = {{ domain_id }}"
- watch:
- sqlite3: zone-insert-12
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
from salt.ext import six
try:
import sqlite3
HAS_SQLITE3 = True
except ImportError:
HAS_SQLITE3 = False
def __virtual__():
'''
Only load if the sqlite3 module is available
'''
return HAS_SQLITE3
def row_absent(name, db, table, where_sql, where_args=None):
'''
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = "More than one row matched the specified query"
elif len(rows) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be removed in " + table
changes['changes']['old'] = rows[0]
else:
if where_args is None:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql)
else:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql,
where_args)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row removed"
changes['changes']['old'] = rows[0]
else:
changes['result'] = False
changes['comment'] = "Unable to remove row"
else:
changes['result'] = True
changes['comment'] = 'Row is absent'
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def table_absent(name, db):
'''
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, "SELECT sql FROM sqlite_master " +
" WHERE type='table' AND name=?", [name])
if len(tables) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "'" + name + "' will be dropped"
else:
conn.execute("DROP TABLE " + name)
conn.commit()
changes['changes']['old'] = tables[0][0]
changes['result'] = True
changes['comment'] = "'" + name + "' was dropped"
elif not tables:
changes['result'] = True
changes['comment'] = "'" + name + "' is already absent"
else:
changes['result'] = False
changes['comment'] = "Multiple tables with the same name='" + \
name + "'"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def table_present(name, db, schema, force=False):
'''
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn,
"SELECT sql FROM sqlite_master " +
"WHERE type='table' AND name=?", [name])
if len(tables) == 1:
sql = None
if isinstance(schema, six.string_types):
sql = schema.strip()
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__['test']:
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be replaced"
else:
conn.execute("DROP TABLE `" + name + "`")
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "Replaced '" + name + "'"
else:
changes['result'] = False
changes['comment'] = "Expected schema=" + sql + \
"\nactual schema=" + tables[0][0]
else:
changes['result'] = True
changes['comment'] = "'" + name + \
"' exists with matching schema"
elif not tables:
# Create the table
sql = None
if isinstance(schema, six.string_types):
sql = schema
else:
sql = _get_sql_from_schema(name, schema)
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be created"
else:
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "Created table '" + name + "'"
else:
changes['result'] = False
changes['comment'] = 'Multiple tables with the same name=' + name
except Exception as e:
changes['result'] = False
changes['comment'] = str(e)
finally:
if conn:
conn.close()
return changes
def _query(conn, sql, parameters=None):
cursor = None
if parameters is None:
cursor = conn.execute(sql)
else:
cursor = conn.execute(sql, parameters)
return cursor.fetchall()
def _get_sql_from_schema(name, schema):
return "CREATE TABLE `" + name + "` (" + ",".join(schema) + ")"
def _dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
saltstack/salt
|
salt/states/sqlite3.py
|
table_absent
|
python
|
def table_absent(name, db):
'''
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, "SELECT sql FROM sqlite_master " +
" WHERE type='table' AND name=?", [name])
if len(tables) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "'" + name + "' will be dropped"
else:
conn.execute("DROP TABLE " + name)
conn.commit()
changes['changes']['old'] = tables[0][0]
changes['result'] = True
changes['comment'] = "'" + name + "' was dropped"
elif not tables:
changes['result'] = True
changes['comment'] = "'" + name + "' is already absent"
else:
changes['result'] = False
changes['comment'] = "Multiple tables with the same name='" + \
name + "'"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
|
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/sqlite3.py#L334-L380
|
[
"def _query(conn, sql, parameters=None):\n cursor = None\n if parameters is None:\n cursor = conn.execute(sql)\n else:\n cursor = conn.execute(sql, parameters)\n return cursor.fetchall()\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SQLite3 databases
===============================
.. versionadded:: 2016.3.0
:depends: - SQLite3 Python Module
:configuration: See :py:mod:`salt.modules.sqlite3` for setup instructions
The sqlite3 module is used to create and manage sqlite3 databases
and execute queries
Here is an example of creating a table using sql statements:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/data/app.sqlite
- schema: CREATE TABLE `users` (`username` TEXT COLLATE NOCASE UNIQUE NOT NULL, `password` BLOB NOT NULL, `salt` BLOB NOT NULL, `last_login` INT)
Here is an example of creating a table using yaml/jinja instead of sql:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/app.sqlite
- schema:
- email TEXT COLLATE NOCASE UNIQUE NOT NULL
- firstname TEXT NOT NULL
- lastname TEXT NOT NULL
- company TEXT NOT NULL
- password BLOB NOT NULL
- salt BLOB NOT NULL
Here is an example of making sure a table is absent:
.. code-block:: yaml
badservers:
sqlite3.table_absent:
- db: /var/www/data/users.sqlite
Sometimes you would to have specific data in tables to be used by other services
Here is an example of making sure rows with specific data exist:
.. code-block:: yaml
user_john_doe_xyz:
sqlite3.row_present:
- db: /var/www/app.sqlite
- table: users
- where_sql: email='john.doe@companyxyz.com'
- data:
email: john.doe@companyxyz.com
lastname: doe
firstname: john
company: companyxyz.com
password: abcdef012934125
salt: abcdef012934125
- require:
- sqlite3: users
Here is an example of removing a row from a table:
.. code-block:: yaml
user_john_doe_abc:
sqlite3.row_absent:
- db: /var/www/app.sqlite
- table: users
- where_sql: email="john.doe@companyabc.com"
- require:
- sqlite3: users
Note that there is no explicit state to perform random queries, however, this
can be approximated with sqlite3's module functions and module.run:
.. code-block:: yaml
zone-delete:
module.run:
- name: sqlite3.modify
- db: {{ db }}
- sql: "DELETE FROM records WHERE id > {{ count[0] }} AND domain_id = {{ domain_id }}"
- watch:
- sqlite3: zone-insert-12
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
from salt.ext import six
try:
import sqlite3
HAS_SQLITE3 = True
except ImportError:
HAS_SQLITE3 = False
def __virtual__():
'''
Only load if the sqlite3 module is available
'''
return HAS_SQLITE3
def row_absent(name, db, table, where_sql, where_args=None):
'''
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = "More than one row matched the specified query"
elif len(rows) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be removed in " + table
changes['changes']['old'] = rows[0]
else:
if where_args is None:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql)
else:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql,
where_args)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row removed"
changes['changes']['old'] = rows[0]
else:
changes['result'] = False
changes['comment'] = "Unable to remove row"
else:
changes['result'] = True
changes['comment'] = 'Row is absent'
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def row_present(name,
db,
table,
data,
where_sql,
where_args=None,
update=False):
'''
Checks to make sure the given row exists. If row exists and update is True
then row will be updated with data. Otherwise it will leave existing
row unmodified and check it against data. If the existing data
doesn't match data_check the state will fail. If the row doesn't
exist then it will insert data into the table. If more than one
row matches, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check the data
data
The dictionary of key/value pairs to check against if
row exists, insert into the table if it doesn't
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
update
True will replace the existing row with data
When False and the row exists and data does not equal
the row data then the state will fail
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql)
else:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = 'More than one row matched the specified query'
elif len(rows) == 1:
for key, value in six.iteritems(data):
if key in rows[0] and rows[0][key] != value:
if update:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be update in " + table
else:
columns = []
params = []
for key, value in six.iteritems(data):
columns.append("`" + key + "`=?")
params.append(value)
if where_args is not None:
params += where_args
sql = "UPDATE `" + table + "` SET "
sql += ",".join(columns)
sql += " WHERE "
sql += where_sql
cursor = conn.execute(sql, params)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row updated"
changes['changes']['old'] = rows[0]
changes['changes']['new'] = data
else:
changes['result'] = False
changes['comment'] = "Row update failed"
else:
changes['result'] = False
changes['comment'] = "Existing data does" + \
"not match desired state"
break
if changes['result'] is None:
changes['result'] = True
changes['comment'] = "Row exists"
else:
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = "Row will be inserted into " + table
else:
columns = []
value_stmt = []
values = []
for key, value in six.iteritems(data):
value_stmt.append('?')
values.append(value)
columns.append("`" + key + "`")
sql = "INSERT INTO `" + table + "` ("
sql += ",".join(columns)
sql += ") VALUES ("
sql += ",".join(value_stmt)
sql += ")"
cursor = conn.execute(sql, values)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = 'Inserted row'
else:
changes['result'] = False
changes['comment'] = "Unable to insert data"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def table_present(name, db, schema, force=False):
'''
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn,
"SELECT sql FROM sqlite_master " +
"WHERE type='table' AND name=?", [name])
if len(tables) == 1:
sql = None
if isinstance(schema, six.string_types):
sql = schema.strip()
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__['test']:
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be replaced"
else:
conn.execute("DROP TABLE `" + name + "`")
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "Replaced '" + name + "'"
else:
changes['result'] = False
changes['comment'] = "Expected schema=" + sql + \
"\nactual schema=" + tables[0][0]
else:
changes['result'] = True
changes['comment'] = "'" + name + \
"' exists with matching schema"
elif not tables:
# Create the table
sql = None
if isinstance(schema, six.string_types):
sql = schema
else:
sql = _get_sql_from_schema(name, schema)
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be created"
else:
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "Created table '" + name + "'"
else:
changes['result'] = False
changes['comment'] = 'Multiple tables with the same name=' + name
except Exception as e:
changes['result'] = False
changes['comment'] = str(e)
finally:
if conn:
conn.close()
return changes
def _query(conn, sql, parameters=None):
cursor = None
if parameters is None:
cursor = conn.execute(sql)
else:
cursor = conn.execute(sql, parameters)
return cursor.fetchall()
def _get_sql_from_schema(name, schema):
return "CREATE TABLE `" + name + "` (" + ",".join(schema) + ")"
def _dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
saltstack/salt
|
salt/states/sqlite3.py
|
table_present
|
python
|
def table_present(name, db, schema, force=False):
'''
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn,
"SELECT sql FROM sqlite_master " +
"WHERE type='table' AND name=?", [name])
if len(tables) == 1:
sql = None
if isinstance(schema, six.string_types):
sql = schema.strip()
else:
sql = _get_sql_from_schema(name, schema)
if sql != tables[0][0]:
if force:
if __opts__['test']:
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be replaced"
else:
conn.execute("DROP TABLE `" + name + "`")
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['old'] = tables[0][0]
changes['changes']['new'] = sql
changes['comment'] = "Replaced '" + name + "'"
else:
changes['result'] = False
changes['comment'] = "Expected schema=" + sql + \
"\nactual schema=" + tables[0][0]
else:
changes['result'] = True
changes['comment'] = "'" + name + \
"' exists with matching schema"
elif not tables:
# Create the table
sql = None
if isinstance(schema, six.string_types):
sql = schema
else:
sql = _get_sql_from_schema(name, schema)
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "'" + name + "' will be created"
else:
conn.execute(sql)
conn.commit()
changes['result'] = True
changes['changes']['new'] = sql
changes['comment'] = "Created table '" + name + "'"
else:
changes['result'] = False
changes['comment'] = 'Multiple tables with the same name=' + name
except Exception as e:
changes['result'] = False
changes['comment'] = str(e)
finally:
if conn:
conn.close()
return changes
|
Make sure the specified table exists with the specified schema
name
The name of the table
db
The name of the database file
schema
The dictionary containing the schema information
force
If the name of the table exists and force is set to False,
the state will fail. If force is set to True, the existing
table will be replaced with the new table
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/sqlite3.py#L383-L471
|
[
"def _query(conn, sql, parameters=None):\n cursor = None\n if parameters is None:\n cursor = conn.execute(sql)\n else:\n cursor = conn.execute(sql, parameters)\n return cursor.fetchall()\n",
"def _get_sql_from_schema(name, schema):\n return \"CREATE TABLE `\" + name + \"` (\" + \",\".join(schema) + \")\"\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SQLite3 databases
===============================
.. versionadded:: 2016.3.0
:depends: - SQLite3 Python Module
:configuration: See :py:mod:`salt.modules.sqlite3` for setup instructions
The sqlite3 module is used to create and manage sqlite3 databases
and execute queries
Here is an example of creating a table using sql statements:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/data/app.sqlite
- schema: CREATE TABLE `users` (`username` TEXT COLLATE NOCASE UNIQUE NOT NULL, `password` BLOB NOT NULL, `salt` BLOB NOT NULL, `last_login` INT)
Here is an example of creating a table using yaml/jinja instead of sql:
.. code-block:: yaml
users:
sqlite3.table_present:
- db: /var/www/app.sqlite
- schema:
- email TEXT COLLATE NOCASE UNIQUE NOT NULL
- firstname TEXT NOT NULL
- lastname TEXT NOT NULL
- company TEXT NOT NULL
- password BLOB NOT NULL
- salt BLOB NOT NULL
Here is an example of making sure a table is absent:
.. code-block:: yaml
badservers:
sqlite3.table_absent:
- db: /var/www/data/users.sqlite
Sometimes you would to have specific data in tables to be used by other services
Here is an example of making sure rows with specific data exist:
.. code-block:: yaml
user_john_doe_xyz:
sqlite3.row_present:
- db: /var/www/app.sqlite
- table: users
- where_sql: email='john.doe@companyxyz.com'
- data:
email: john.doe@companyxyz.com
lastname: doe
firstname: john
company: companyxyz.com
password: abcdef012934125
salt: abcdef012934125
- require:
- sqlite3: users
Here is an example of removing a row from a table:
.. code-block:: yaml
user_john_doe_abc:
sqlite3.row_absent:
- db: /var/www/app.sqlite
- table: users
- where_sql: email="john.doe@companyabc.com"
- require:
- sqlite3: users
Note that there is no explicit state to perform random queries, however, this
can be approximated with sqlite3's module functions and module.run:
.. code-block:: yaml
zone-delete:
module.run:
- name: sqlite3.modify
- db: {{ db }}
- sql: "DELETE FROM records WHERE id > {{ count[0] }} AND domain_id = {{ domain_id }}"
- watch:
- sqlite3: zone-insert-12
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
from salt.ext import six
try:
import sqlite3
HAS_SQLITE3 = True
except ImportError:
HAS_SQLITE3 = False
def __virtual__():
'''
Only load if the sqlite3 module is available
'''
return HAS_SQLITE3
def row_absent(name, db, table, where_sql, where_args=None):
'''
Makes sure the specified row is absent in db. If multiple rows
match where_sql, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql)
else:
rows = _query(conn,
"SELECT * FROM `" + table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = "More than one row matched the specified query"
elif len(rows) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be removed in " + table
changes['changes']['old'] = rows[0]
else:
if where_args is None:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql)
else:
cursor = conn.execute("DELETE FROM `" +
table + "` WHERE " + where_sql,
where_args)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row removed"
changes['changes']['old'] = rows[0]
else:
changes['result'] = False
changes['comment'] = "Unable to remove row"
else:
changes['result'] = True
changes['comment'] = 'Row is absent'
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def row_present(name,
db,
table,
data,
where_sql,
where_args=None,
update=False):
'''
Checks to make sure the given row exists. If row exists and update is True
then row will be updated with data. Otherwise it will leave existing
row unmodified and check it against data. If the existing data
doesn't match data_check the state will fail. If the row doesn't
exist then it will insert data into the table. If more than one
row matches, then the state will fail.
name
Only used as the unique ID
db
The database file name
table
The table name to check the data
data
The dictionary of key/value pairs to check against if
row exists, insert into the table if it doesn't
where_sql
The sql to select the row to check
where_args
The list parameters to substitute in where_sql
update
True will replace the existing row with data
When False and the row exists and data does not equal
the row data then the state will fail
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = _dict_factory
rows = None
if where_args is None:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql)
else:
rows = _query(conn, "SELECT * FROM `" +
table + "` WHERE " + where_sql,
where_args)
if len(rows) > 1:
changes['result'] = False
changes['comment'] = 'More than one row matched the specified query'
elif len(rows) == 1:
for key, value in six.iteritems(data):
if key in rows[0] and rows[0][key] != value:
if update:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "Row will be update in " + table
else:
columns = []
params = []
for key, value in six.iteritems(data):
columns.append("`" + key + "`=?")
params.append(value)
if where_args is not None:
params += where_args
sql = "UPDATE `" + table + "` SET "
sql += ",".join(columns)
sql += " WHERE "
sql += where_sql
cursor = conn.execute(sql, params)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['comment'] = "Row updated"
changes['changes']['old'] = rows[0]
changes['changes']['new'] = data
else:
changes['result'] = False
changes['comment'] = "Row update failed"
else:
changes['result'] = False
changes['comment'] = "Existing data does" + \
"not match desired state"
break
if changes['result'] is None:
changes['result'] = True
changes['comment'] = "Row exists"
else:
if __opts__['test']:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = "Row will be inserted into " + table
else:
columns = []
value_stmt = []
values = []
for key, value in six.iteritems(data):
value_stmt.append('?')
values.append(value)
columns.append("`" + key + "`")
sql = "INSERT INTO `" + table + "` ("
sql += ",".join(columns)
sql += ") VALUES ("
sql += ",".join(value_stmt)
sql += ")"
cursor = conn.execute(sql, values)
conn.commit()
if cursor.rowcount == 1:
changes['result'] = True
changes['changes']['new'] = data
changes['comment'] = 'Inserted row'
else:
changes['result'] = False
changes['comment'] = "Unable to insert data"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def table_absent(name, db):
'''
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, "SELECT sql FROM sqlite_master " +
" WHERE type='table' AND name=?", [name])
if len(tables) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "'" + name + "' will be dropped"
else:
conn.execute("DROP TABLE " + name)
conn.commit()
changes['changes']['old'] = tables[0][0]
changes['result'] = True
changes['comment'] = "'" + name + "' was dropped"
elif not tables:
changes['result'] = True
changes['comment'] = "'" + name + "' is already absent"
else:
changes['result'] = False
changes['comment'] = "Multiple tables with the same name='" + \
name + "'"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes
def _query(conn, sql, parameters=None):
cursor = None
if parameters is None:
cursor = conn.execute(sql)
else:
cursor = conn.execute(sql, parameters)
return cursor.fetchall()
def _get_sql_from_schema(name, schema):
return "CREATE TABLE `" + name + "` (" + ",".join(schema) + ")"
def _dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
saltstack/salt
|
salt/matchers/range_match.py
|
match
|
python
|
def match(tgt, opts=None):
'''
Matches based on range cluster
'''
if not opts:
opts = __opts__
if HAS_RANGE:
range_ = seco.range.Range(opts['range_server'])
try:
return opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False
|
Matches based on range cluster
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/matchers/range_match.py#L19-L32
| null |
# -*- coding: utf-8 -*-
'''
This is the default range matcher.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
log = logging.getLogger(__name__)
|
saltstack/salt
|
salt/returners/cassandra_return.py
|
returner
|
python
|
def returner(ret):
'''
Return data to a Cassandra ColumnFamily
'''
consistency_level = getattr(pycassa.ConsistencyLevel,
__opts__['cassandra.consistency_level'])
pool = pycassa.ConnectionPool(__opts__['cassandra.keyspace'],
__opts__['cassandra.servers'])
ccf = pycassa.ColumnFamily(pool, __opts__['cassandra.column_family'],
write_consistency_level=consistency_level)
columns = {'fun': ret['fun'],
'id': ret['id']}
if isinstance(ret['return'], dict):
for key, value in six.iteritems(ret['return']):
columns['return.{0}'.format(key)] = six.text_type(value)
else:
columns['return'] = six.text_type(ret['return'])
log.debug(columns)
ccf.insert(ret['jid'], columns)
|
Return data to a Cassandra ColumnFamily
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/cassandra_return.py#L54-L76
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to a Cassandra ColumnFamily
Here's an example Keyspace / ColumnFamily setup that works with this
returner::
create keyspace salt;
use salt;
create column family returns
with key_validation_class='UTF8Type'
and comparator='UTF8Type'
and default_validation_class='UTF8Type';
Required python modules: pycassa
To use the cassandra returner, append '--return cassandra' to the salt command. ex:
salt '*' test.ping --return cassandra
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.jid
# Import third party libs
from salt.ext import six
try:
import pycassa # pylint: disable=import-error
HAS_PYCASSA = True
except ImportError:
HAS_PYCASSA = False
log = logging.getLogger(__name__)
__opts__ = {'cassandra.servers': ['localhost:9160'],
'cassandra.keyspace': 'salt',
'cassandra.column_family': 'returns',
'cassandra.consistency_level': 'ONE'}
# Define the module's virtual name
__virtualname__ = 'cassandra'
def __virtual__():
if not HAS_PYCASSA:
return False, 'Could not import cassandra returner; pycassa is not installed.'
return __virtualname__
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/modules/seed.py
|
prep_bootstrap
|
python
|
def prep_bootstrap(mpt):
'''
Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp
'''
# Verify that the boostrap script is downloaded
bs_ = __salt__['config.gather_bootstrap_script']()
fpd_ = os.path.join(mpt, 'tmp', "{0}".format(
uuid.uuid4()))
if not os.path.exists(fpd_):
os.makedirs(fpd_)
os.chmod(fpd_, 0o700)
fp_ = os.path.join(fpd_, os.path.basename(bs_))
# Copy script into tmp
shutil.copy(bs_, fp_)
tmppath = fpd_.replace(mpt, '')
return fp_, tmppath
|
Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/seed.py#L39-L61
| null |
# -*- coding: utf-8 -*-
'''
Virtual machine image management tools
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import os
import shutil
import logging
import tempfile
# Import salt libs
import salt.crypt
import salt.utils.cloud
import salt.utils.files
import salt.config
import salt.syspaths
import uuid
# Set up logging
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
'apply_': 'apply'
}
def _file_or_content(file_):
if os.path.exists(file_):
with salt.utils.files.fopen(file_) as fic:
return salt.utils.stringutils.to_unicode(fic.read())
return file_
def _mount(path, ftype, root=None):
mpt = None
if ftype == 'block':
mpt = tempfile.mkdtemp()
if not __salt__['mount.mount'](mpt, path):
os.rmdir(mpt)
return None
elif ftype == 'dir':
return path
elif ftype == 'file':
if 'guestfs.mount' in __salt__:
util = 'guestfs'
elif 'qemu_nbd.init' in __salt__:
util = 'qemu_nbd'
else:
return None
mpt = __salt__['mount.mount'](path, device=root, util=util)
if not mpt:
return None
return mpt
def _umount(mpt, ftype):
if ftype == 'block':
__salt__['mount.umount'](mpt)
os.rmdir(mpt)
elif ftype == 'file':
__salt__['mount.umount'](mpt, util='qemu_nbd')
def apply_(path, id_=None, config=None, approve_key=True, install=True,
prep_install=False, pub_key=None, priv_key=None, mount_point=None):
'''
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.
CLI Example:
.. code-block:: bash
salt 'minion' seed.apply path id [config=config_data] \\
[gen_key=(true|false)] [approve_key=(true|false)] \\
[install=(true|false)]
path
Full path to the directory, device, or disk image on the target
minion's file system.
id
Minion id with which to seed the path.
config
Minion configuration options. By default, the 'master' option is set to
the target host's 'master'.
approve_key
Request a pre-approval of the generated minion key. Requires
that the salt-master be configured to either auto-accept all keys or
expect a signing request from the target host. Default: true.
install
Install salt-minion, if absent. Default: true.
prep_install
Prepare the bootstrap script, but don't run it. Default: false
'''
stats = __salt__['file.stats'](path, follow_symlinks=True)
if not stats:
return '{0} does not exist'.format(path)
ftype = stats['type']
path = stats['target']
log.debug('Mounting %s at %s', ftype, path)
try:
os.makedirs(path)
except OSError:
# The directory already exists
pass
mpt = _mount(path, ftype, mount_point)
if not mpt:
return '{0} could not be mounted'.format(path)
tmp = os.path.join(mpt, 'tmp')
log.debug('Attempting to create directory %s', tmp)
try:
os.makedirs(tmp)
except OSError:
if not os.path.isdir(tmp):
raise
cfg_files = mkconfig(config, tmp=tmp, id_=id_, approve_key=approve_key,
pub_key=pub_key, priv_key=priv_key)
if _check_install(mpt):
# salt-minion is already installed, just move the config and keys
# into place
log.info('salt-minion pre-installed on image, '
'configuring as %s', id_)
minion_config = salt.config.minion_config(cfg_files['config'])
pki_dir = minion_config['pki_dir']
if not os.path.isdir(os.path.join(mpt, pki_dir.lstrip('/'))):
__salt__['file.makedirs'](
os.path.join(mpt, pki_dir.lstrip('/'), '')
)
os.rename(cfg_files['privkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pem'))
os.rename(cfg_files['pubkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pub'))
os.rename(cfg_files['config'], os.path.join(mpt, 'etc/salt/minion'))
res = True
elif install:
log.info('Attempting to install salt-minion to %s', mpt)
res = _install(mpt)
elif prep_install:
log.error('The prep_install option is no longer supported. Please use '
'the bootstrap script installed with Salt, located at %s.',
salt.syspaths.BOOTSTRAP)
res = False
else:
log.warning('No useful action performed on %s', mpt)
res = False
_umount(mpt, ftype)
return res
def mkconfig(config=None,
tmp=None,
id_=None,
approve_key=True,
pub_key=None,
priv_key=None):
'''
Generate keys and config and put them in a tmp directory.
pub_key
absolute path or file content of an optional preseeded salt key
priv_key
absolute path or file content of an optional preseeded salt key
CLI Example:
.. code-block:: bash
salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\
[id_=minion_id] [approve_key=(true|false)]
'''
if tmp is None:
tmp = tempfile.mkdtemp()
if config is None:
config = {}
if 'master' not in config and __opts__['master'] != 'salt':
config['master'] = __opts__['master']
if id_:
config['id'] = id_
# Write the new minion's config to a tmp file
tmp_config = os.path.join(tmp, 'minion')
with salt.utils.files.fopen(tmp_config, 'w+') as fp_:
fp_.write(salt.utils.cloud.salt_config_to_yaml(config))
# Generate keys for the minion
pubkeyfn = os.path.join(tmp, 'minion.pub')
privkeyfn = os.path.join(tmp, 'minion.pem')
preseeded = pub_key and priv_key
if preseeded:
log.debug('Writing minion.pub to %s', pubkeyfn)
log.debug('Writing minion.pem to %s', privkeyfn)
with salt.utils.files.fopen(pubkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key)))
with salt.utils.files.fopen(privkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key)))
os.chmod(pubkeyfn, 0o600)
os.chmod(privkeyfn, 0o600)
else:
salt.crypt.gen_keys(tmp, 'minion', 2048)
if approve_key and not preseeded:
with salt.utils.files.fopen(pubkeyfn) as fp_:
pubkey = salt.utils.stringutils.to_unicode(fp_.read())
__salt__['pillar.ext']({'virtkey': [id_, pubkey]})
return {'config': tmp_config, 'pubkey': pubkeyfn, 'privkey': privkeyfn}
def _install(mpt):
'''
Determine whether salt-minion is installed and, if not,
install it.
Return True if install is successful or already installed.
'''
_check_resolv(mpt)
boot_, tmppath = (prep_bootstrap(mpt)
or salt.syspaths.BOOTSTRAP)
# Exec the chroot command
cmd = 'if type salt-minion; then exit 0; '
cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
def _check_resolv(mpt):
'''
Check that the resolv.conf is present and populated
'''
resolv = os.path.join(mpt, 'etc/resolv.conf')
replace = False
if os.path.islink(resolv):
resolv = os.path.realpath(resolv)
if not os.path.isdir(os.path.dirname(resolv)):
os.makedirs(os.path.dirname(resolv))
if not os.path.isfile(resolv):
replace = True
if not replace:
with salt.utils.files.fopen(resolv, 'rb') as fp_:
conts = salt.utils.stringutils.to_unicode(fp_.read())
if 'nameserver' not in conts:
replace = True
if 'nameserver 127.0.0.1' in conts:
replace = True
if replace:
shutil.copy('/etc/resolv.conf', resolv)
def _check_install(root):
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
cmd = ('if ! type salt-minion; then exit 1; fi')
cmd = 'chroot \'{0}\' {1} -c \'{2}\''.format(
root,
sh_,
cmd)
return not __salt__['cmd.retcode'](cmd,
output_loglevel='quiet',
python_shell=True)
|
saltstack/salt
|
salt/modules/seed.py
|
apply_
|
python
|
def apply_(path, id_=None, config=None, approve_key=True, install=True,
prep_install=False, pub_key=None, priv_key=None, mount_point=None):
'''
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.
CLI Example:
.. code-block:: bash
salt 'minion' seed.apply path id [config=config_data] \\
[gen_key=(true|false)] [approve_key=(true|false)] \\
[install=(true|false)]
path
Full path to the directory, device, or disk image on the target
minion's file system.
id
Minion id with which to seed the path.
config
Minion configuration options. By default, the 'master' option is set to
the target host's 'master'.
approve_key
Request a pre-approval of the generated minion key. Requires
that the salt-master be configured to either auto-accept all keys or
expect a signing request from the target host. Default: true.
install
Install salt-minion, if absent. Default: true.
prep_install
Prepare the bootstrap script, but don't run it. Default: false
'''
stats = __salt__['file.stats'](path, follow_symlinks=True)
if not stats:
return '{0} does not exist'.format(path)
ftype = stats['type']
path = stats['target']
log.debug('Mounting %s at %s', ftype, path)
try:
os.makedirs(path)
except OSError:
# The directory already exists
pass
mpt = _mount(path, ftype, mount_point)
if not mpt:
return '{0} could not be mounted'.format(path)
tmp = os.path.join(mpt, 'tmp')
log.debug('Attempting to create directory %s', tmp)
try:
os.makedirs(tmp)
except OSError:
if not os.path.isdir(tmp):
raise
cfg_files = mkconfig(config, tmp=tmp, id_=id_, approve_key=approve_key,
pub_key=pub_key, priv_key=priv_key)
if _check_install(mpt):
# salt-minion is already installed, just move the config and keys
# into place
log.info('salt-minion pre-installed on image, '
'configuring as %s', id_)
minion_config = salt.config.minion_config(cfg_files['config'])
pki_dir = minion_config['pki_dir']
if not os.path.isdir(os.path.join(mpt, pki_dir.lstrip('/'))):
__salt__['file.makedirs'](
os.path.join(mpt, pki_dir.lstrip('/'), '')
)
os.rename(cfg_files['privkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pem'))
os.rename(cfg_files['pubkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pub'))
os.rename(cfg_files['config'], os.path.join(mpt, 'etc/salt/minion'))
res = True
elif install:
log.info('Attempting to install salt-minion to %s', mpt)
res = _install(mpt)
elif prep_install:
log.error('The prep_install option is no longer supported. Please use '
'the bootstrap script installed with Salt, located at %s.',
salt.syspaths.BOOTSTRAP)
res = False
else:
log.warning('No useful action performed on %s', mpt)
res = False
_umount(mpt, ftype)
return res
|
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.
CLI Example:
.. code-block:: bash
salt 'minion' seed.apply path id [config=config_data] \\
[gen_key=(true|false)] [approve_key=(true|false)] \\
[install=(true|false)]
path
Full path to the directory, device, or disk image on the target
minion's file system.
id
Minion id with which to seed the path.
config
Minion configuration options. By default, the 'master' option is set to
the target host's 'master'.
approve_key
Request a pre-approval of the generated minion key. Requires
that the salt-master be configured to either auto-accept all keys or
expect a signing request from the target host. Default: true.
install
Install salt-minion, if absent. Default: true.
prep_install
Prepare the bootstrap script, but don't run it. Default: false
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/seed.py#L94-L187
|
[
"def minion_config(path,\n env_var='SALT_MINION_CONFIG',\n defaults=None,\n cache_minion_id=False,\n ignore_config_errors=True,\n minion_id=None,\n role='minion'):\n '''\n Reads in the minion configuration file and sets up special options\n\n This is useful for Minion-side operations, such as the\n :py:class:`~salt.client.Caller` class, and manually running the loader\n interface.\n\n .. code-block:: python\n\n import salt.config\n minion_opts = salt.config.minion_config('/etc/salt/minion')\n '''\n if defaults is None:\n defaults = DEFAULT_MINION_OPTS.copy()\n\n if not os.environ.get(env_var, None):\n # No valid setting was given using the configuration variable.\n # Lets see is SALT_CONFIG_DIR is of any use\n salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)\n if salt_config_dir:\n env_config_file_path = os.path.join(salt_config_dir, 'minion')\n if salt_config_dir and os.path.isfile(env_config_file_path):\n # We can get a configuration file using SALT_CONFIG_DIR, let's\n # update the environment with this information\n os.environ[env_var] = env_config_file_path\n\n overrides = load_config(path, env_var, DEFAULT_MINION_OPTS['conf_file'])\n default_include = overrides.get('default_include',\n defaults['default_include'])\n include = overrides.get('include', [])\n\n overrides.update(include_config(default_include, path, verbose=False,\n exit_on_config_errors=not ignore_config_errors))\n overrides.update(include_config(include, path, verbose=True,\n exit_on_config_errors=not ignore_config_errors))\n\n opts = apply_minion_config(overrides, defaults,\n cache_minion_id=cache_minion_id,\n minion_id=minion_id)\n opts['__role'] = role\n apply_sdb(opts)\n _validate_opts(opts)\n return opts\n",
"def _install(mpt):\n '''\n Determine whether salt-minion is installed and, if not,\n install it.\n Return True if install is successful or already installed.\n '''\n _check_resolv(mpt)\n boot_, tmppath = (prep_bootstrap(mpt)\n or salt.syspaths.BOOTSTRAP)\n # Exec the chroot command\n cmd = 'if type salt-minion; then exit 0; '\n cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))\n return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']\n",
"def _mount(path, ftype, root=None):\n mpt = None\n if ftype == 'block':\n mpt = tempfile.mkdtemp()\n if not __salt__['mount.mount'](mpt, path):\n os.rmdir(mpt)\n return None\n elif ftype == 'dir':\n return path\n elif ftype == 'file':\n if 'guestfs.mount' in __salt__:\n util = 'guestfs'\n elif 'qemu_nbd.init' in __salt__:\n util = 'qemu_nbd'\n else:\n return None\n mpt = __salt__['mount.mount'](path, device=root, util=util)\n if not mpt:\n return None\n return mpt\n",
"def _umount(mpt, ftype):\n if ftype == 'block':\n __salt__['mount.umount'](mpt)\n os.rmdir(mpt)\n elif ftype == 'file':\n __salt__['mount.umount'](mpt, util='qemu_nbd')\n",
"def mkconfig(config=None,\n tmp=None,\n id_=None,\n approve_key=True,\n pub_key=None,\n priv_key=None):\n '''\n Generate keys and config and put them in a tmp directory.\n\n pub_key\n absolute path or file content of an optional preseeded salt key\n\n priv_key\n absolute path or file content of an optional preseeded salt key\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\\\\n [id_=minion_id] [approve_key=(true|false)]\n '''\n if tmp is None:\n tmp = tempfile.mkdtemp()\n if config is None:\n config = {}\n if 'master' not in config and __opts__['master'] != 'salt':\n config['master'] = __opts__['master']\n if id_:\n config['id'] = id_\n\n # Write the new minion's config to a tmp file\n tmp_config = os.path.join(tmp, 'minion')\n with salt.utils.files.fopen(tmp_config, 'w+') as fp_:\n fp_.write(salt.utils.cloud.salt_config_to_yaml(config))\n\n # Generate keys for the minion\n pubkeyfn = os.path.join(tmp, 'minion.pub')\n privkeyfn = os.path.join(tmp, 'minion.pem')\n preseeded = pub_key and priv_key\n if preseeded:\n log.debug('Writing minion.pub to %s', pubkeyfn)\n log.debug('Writing minion.pem to %s', privkeyfn)\n with salt.utils.files.fopen(pubkeyfn, 'w') as fic:\n fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key)))\n with salt.utils.files.fopen(privkeyfn, 'w') as fic:\n fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key)))\n os.chmod(pubkeyfn, 0o600)\n os.chmod(privkeyfn, 0o600)\n else:\n salt.crypt.gen_keys(tmp, 'minion', 2048)\n if approve_key and not preseeded:\n with salt.utils.files.fopen(pubkeyfn) as fp_:\n pubkey = salt.utils.stringutils.to_unicode(fp_.read())\n __salt__['pillar.ext']({'virtkey': [id_, pubkey]})\n\n return {'config': tmp_config, 'pubkey': pubkeyfn, 'privkey': privkeyfn}\n",
"def _check_install(root):\n sh_ = '/bin/sh'\n if os.path.isfile(os.path.join(root, 'bin/bash')):\n sh_ = '/bin/bash'\n\n cmd = ('if ! type salt-minion; then exit 1; fi')\n cmd = 'chroot \\'{0}\\' {1} -c \\'{2}\\''.format(\n root,\n sh_,\n cmd)\n\n return not __salt__['cmd.retcode'](cmd,\n output_loglevel='quiet',\n python_shell=True)\n"
] |
# -*- coding: utf-8 -*-
'''
Virtual machine image management tools
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import os
import shutil
import logging
import tempfile
# Import salt libs
import salt.crypt
import salt.utils.cloud
import salt.utils.files
import salt.config
import salt.syspaths
import uuid
# Set up logging
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
'apply_': 'apply'
}
def _file_or_content(file_):
if os.path.exists(file_):
with salt.utils.files.fopen(file_) as fic:
return salt.utils.stringutils.to_unicode(fic.read())
return file_
def prep_bootstrap(mpt):
'''
Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp
'''
# Verify that the boostrap script is downloaded
bs_ = __salt__['config.gather_bootstrap_script']()
fpd_ = os.path.join(mpt, 'tmp', "{0}".format(
uuid.uuid4()))
if not os.path.exists(fpd_):
os.makedirs(fpd_)
os.chmod(fpd_, 0o700)
fp_ = os.path.join(fpd_, os.path.basename(bs_))
# Copy script into tmp
shutil.copy(bs_, fp_)
tmppath = fpd_.replace(mpt, '')
return fp_, tmppath
def _mount(path, ftype, root=None):
mpt = None
if ftype == 'block':
mpt = tempfile.mkdtemp()
if not __salt__['mount.mount'](mpt, path):
os.rmdir(mpt)
return None
elif ftype == 'dir':
return path
elif ftype == 'file':
if 'guestfs.mount' in __salt__:
util = 'guestfs'
elif 'qemu_nbd.init' in __salt__:
util = 'qemu_nbd'
else:
return None
mpt = __salt__['mount.mount'](path, device=root, util=util)
if not mpt:
return None
return mpt
def _umount(mpt, ftype):
if ftype == 'block':
__salt__['mount.umount'](mpt)
os.rmdir(mpt)
elif ftype == 'file':
__salt__['mount.umount'](mpt, util='qemu_nbd')
def mkconfig(config=None,
tmp=None,
id_=None,
approve_key=True,
pub_key=None,
priv_key=None):
'''
Generate keys and config and put them in a tmp directory.
pub_key
absolute path or file content of an optional preseeded salt key
priv_key
absolute path or file content of an optional preseeded salt key
CLI Example:
.. code-block:: bash
salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\
[id_=minion_id] [approve_key=(true|false)]
'''
if tmp is None:
tmp = tempfile.mkdtemp()
if config is None:
config = {}
if 'master' not in config and __opts__['master'] != 'salt':
config['master'] = __opts__['master']
if id_:
config['id'] = id_
# Write the new minion's config to a tmp file
tmp_config = os.path.join(tmp, 'minion')
with salt.utils.files.fopen(tmp_config, 'w+') as fp_:
fp_.write(salt.utils.cloud.salt_config_to_yaml(config))
# Generate keys for the minion
pubkeyfn = os.path.join(tmp, 'minion.pub')
privkeyfn = os.path.join(tmp, 'minion.pem')
preseeded = pub_key and priv_key
if preseeded:
log.debug('Writing minion.pub to %s', pubkeyfn)
log.debug('Writing minion.pem to %s', privkeyfn)
with salt.utils.files.fopen(pubkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key)))
with salt.utils.files.fopen(privkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key)))
os.chmod(pubkeyfn, 0o600)
os.chmod(privkeyfn, 0o600)
else:
salt.crypt.gen_keys(tmp, 'minion', 2048)
if approve_key and not preseeded:
with salt.utils.files.fopen(pubkeyfn) as fp_:
pubkey = salt.utils.stringutils.to_unicode(fp_.read())
__salt__['pillar.ext']({'virtkey': [id_, pubkey]})
return {'config': tmp_config, 'pubkey': pubkeyfn, 'privkey': privkeyfn}
def _install(mpt):
'''
Determine whether salt-minion is installed and, if not,
install it.
Return True if install is successful or already installed.
'''
_check_resolv(mpt)
boot_, tmppath = (prep_bootstrap(mpt)
or salt.syspaths.BOOTSTRAP)
# Exec the chroot command
cmd = 'if type salt-minion; then exit 0; '
cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
def _check_resolv(mpt):
'''
Check that the resolv.conf is present and populated
'''
resolv = os.path.join(mpt, 'etc/resolv.conf')
replace = False
if os.path.islink(resolv):
resolv = os.path.realpath(resolv)
if not os.path.isdir(os.path.dirname(resolv)):
os.makedirs(os.path.dirname(resolv))
if not os.path.isfile(resolv):
replace = True
if not replace:
with salt.utils.files.fopen(resolv, 'rb') as fp_:
conts = salt.utils.stringutils.to_unicode(fp_.read())
if 'nameserver' not in conts:
replace = True
if 'nameserver 127.0.0.1' in conts:
replace = True
if replace:
shutil.copy('/etc/resolv.conf', resolv)
def _check_install(root):
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
cmd = ('if ! type salt-minion; then exit 1; fi')
cmd = 'chroot \'{0}\' {1} -c \'{2}\''.format(
root,
sh_,
cmd)
return not __salt__['cmd.retcode'](cmd,
output_loglevel='quiet',
python_shell=True)
|
saltstack/salt
|
salt/modules/seed.py
|
mkconfig
|
python
|
def mkconfig(config=None,
tmp=None,
id_=None,
approve_key=True,
pub_key=None,
priv_key=None):
'''
Generate keys and config and put them in a tmp directory.
pub_key
absolute path or file content of an optional preseeded salt key
priv_key
absolute path or file content of an optional preseeded salt key
CLI Example:
.. code-block:: bash
salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\
[id_=minion_id] [approve_key=(true|false)]
'''
if tmp is None:
tmp = tempfile.mkdtemp()
if config is None:
config = {}
if 'master' not in config and __opts__['master'] != 'salt':
config['master'] = __opts__['master']
if id_:
config['id'] = id_
# Write the new minion's config to a tmp file
tmp_config = os.path.join(tmp, 'minion')
with salt.utils.files.fopen(tmp_config, 'w+') as fp_:
fp_.write(salt.utils.cloud.salt_config_to_yaml(config))
# Generate keys for the minion
pubkeyfn = os.path.join(tmp, 'minion.pub')
privkeyfn = os.path.join(tmp, 'minion.pem')
preseeded = pub_key and priv_key
if preseeded:
log.debug('Writing minion.pub to %s', pubkeyfn)
log.debug('Writing minion.pem to %s', privkeyfn)
with salt.utils.files.fopen(pubkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key)))
with salt.utils.files.fopen(privkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key)))
os.chmod(pubkeyfn, 0o600)
os.chmod(privkeyfn, 0o600)
else:
salt.crypt.gen_keys(tmp, 'minion', 2048)
if approve_key and not preseeded:
with salt.utils.files.fopen(pubkeyfn) as fp_:
pubkey = salt.utils.stringutils.to_unicode(fp_.read())
__salt__['pillar.ext']({'virtkey': [id_, pubkey]})
return {'config': tmp_config, 'pubkey': pubkeyfn, 'privkey': privkeyfn}
|
Generate keys and config and put them in a tmp directory.
pub_key
absolute path or file content of an optional preseeded salt key
priv_key
absolute path or file content of an optional preseeded salt key
CLI Example:
.. code-block:: bash
salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\
[id_=minion_id] [approve_key=(true|false)]
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/seed.py#L190-L246
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def salt_config_to_yaml(configuration, line_break='\\n'):\n '''\n Return a salt configuration dictionary, master or minion, as a yaml dump\n '''\n return salt.utils.yaml.safe_dump(\n configuration,\n line_break=line_break,\n default_flow_style=False)\n"
] |
# -*- coding: utf-8 -*-
'''
Virtual machine image management tools
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import os
import shutil
import logging
import tempfile
# Import salt libs
import salt.crypt
import salt.utils.cloud
import salt.utils.files
import salt.config
import salt.syspaths
import uuid
# Set up logging
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
'apply_': 'apply'
}
def _file_or_content(file_):
if os.path.exists(file_):
with salt.utils.files.fopen(file_) as fic:
return salt.utils.stringutils.to_unicode(fic.read())
return file_
def prep_bootstrap(mpt):
'''
Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp
'''
# Verify that the boostrap script is downloaded
bs_ = __salt__['config.gather_bootstrap_script']()
fpd_ = os.path.join(mpt, 'tmp', "{0}".format(
uuid.uuid4()))
if not os.path.exists(fpd_):
os.makedirs(fpd_)
os.chmod(fpd_, 0o700)
fp_ = os.path.join(fpd_, os.path.basename(bs_))
# Copy script into tmp
shutil.copy(bs_, fp_)
tmppath = fpd_.replace(mpt, '')
return fp_, tmppath
def _mount(path, ftype, root=None):
mpt = None
if ftype == 'block':
mpt = tempfile.mkdtemp()
if not __salt__['mount.mount'](mpt, path):
os.rmdir(mpt)
return None
elif ftype == 'dir':
return path
elif ftype == 'file':
if 'guestfs.mount' in __salt__:
util = 'guestfs'
elif 'qemu_nbd.init' in __salt__:
util = 'qemu_nbd'
else:
return None
mpt = __salt__['mount.mount'](path, device=root, util=util)
if not mpt:
return None
return mpt
def _umount(mpt, ftype):
if ftype == 'block':
__salt__['mount.umount'](mpt)
os.rmdir(mpt)
elif ftype == 'file':
__salt__['mount.umount'](mpt, util='qemu_nbd')
def apply_(path, id_=None, config=None, approve_key=True, install=True,
prep_install=False, pub_key=None, priv_key=None, mount_point=None):
'''
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.
CLI Example:
.. code-block:: bash
salt 'minion' seed.apply path id [config=config_data] \\
[gen_key=(true|false)] [approve_key=(true|false)] \\
[install=(true|false)]
path
Full path to the directory, device, or disk image on the target
minion's file system.
id
Minion id with which to seed the path.
config
Minion configuration options. By default, the 'master' option is set to
the target host's 'master'.
approve_key
Request a pre-approval of the generated minion key. Requires
that the salt-master be configured to either auto-accept all keys or
expect a signing request from the target host. Default: true.
install
Install salt-minion, if absent. Default: true.
prep_install
Prepare the bootstrap script, but don't run it. Default: false
'''
stats = __salt__['file.stats'](path, follow_symlinks=True)
if not stats:
return '{0} does not exist'.format(path)
ftype = stats['type']
path = stats['target']
log.debug('Mounting %s at %s', ftype, path)
try:
os.makedirs(path)
except OSError:
# The directory already exists
pass
mpt = _mount(path, ftype, mount_point)
if not mpt:
return '{0} could not be mounted'.format(path)
tmp = os.path.join(mpt, 'tmp')
log.debug('Attempting to create directory %s', tmp)
try:
os.makedirs(tmp)
except OSError:
if not os.path.isdir(tmp):
raise
cfg_files = mkconfig(config, tmp=tmp, id_=id_, approve_key=approve_key,
pub_key=pub_key, priv_key=priv_key)
if _check_install(mpt):
# salt-minion is already installed, just move the config and keys
# into place
log.info('salt-minion pre-installed on image, '
'configuring as %s', id_)
minion_config = salt.config.minion_config(cfg_files['config'])
pki_dir = minion_config['pki_dir']
if not os.path.isdir(os.path.join(mpt, pki_dir.lstrip('/'))):
__salt__['file.makedirs'](
os.path.join(mpt, pki_dir.lstrip('/'), '')
)
os.rename(cfg_files['privkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pem'))
os.rename(cfg_files['pubkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pub'))
os.rename(cfg_files['config'], os.path.join(mpt, 'etc/salt/minion'))
res = True
elif install:
log.info('Attempting to install salt-minion to %s', mpt)
res = _install(mpt)
elif prep_install:
log.error('The prep_install option is no longer supported. Please use '
'the bootstrap script installed with Salt, located at %s.',
salt.syspaths.BOOTSTRAP)
res = False
else:
log.warning('No useful action performed on %s', mpt)
res = False
_umount(mpt, ftype)
return res
def _install(mpt):
'''
Determine whether salt-minion is installed and, if not,
install it.
Return True if install is successful or already installed.
'''
_check_resolv(mpt)
boot_, tmppath = (prep_bootstrap(mpt)
or salt.syspaths.BOOTSTRAP)
# Exec the chroot command
cmd = 'if type salt-minion; then exit 0; '
cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
def _check_resolv(mpt):
'''
Check that the resolv.conf is present and populated
'''
resolv = os.path.join(mpt, 'etc/resolv.conf')
replace = False
if os.path.islink(resolv):
resolv = os.path.realpath(resolv)
if not os.path.isdir(os.path.dirname(resolv)):
os.makedirs(os.path.dirname(resolv))
if not os.path.isfile(resolv):
replace = True
if not replace:
with salt.utils.files.fopen(resolv, 'rb') as fp_:
conts = salt.utils.stringutils.to_unicode(fp_.read())
if 'nameserver' not in conts:
replace = True
if 'nameserver 127.0.0.1' in conts:
replace = True
if replace:
shutil.copy('/etc/resolv.conf', resolv)
def _check_install(root):
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
cmd = ('if ! type salt-minion; then exit 1; fi')
cmd = 'chroot \'{0}\' {1} -c \'{2}\''.format(
root,
sh_,
cmd)
return not __salt__['cmd.retcode'](cmd,
output_loglevel='quiet',
python_shell=True)
|
saltstack/salt
|
salt/modules/seed.py
|
_install
|
python
|
def _install(mpt):
'''
Determine whether salt-minion is installed and, if not,
install it.
Return True if install is successful or already installed.
'''
_check_resolv(mpt)
boot_, tmppath = (prep_bootstrap(mpt)
or salt.syspaths.BOOTSTRAP)
# Exec the chroot command
cmd = 'if type salt-minion; then exit 0; '
cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
|
Determine whether salt-minion is installed and, if not,
install it.
Return True if install is successful or already installed.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/seed.py#L249-L261
| null |
# -*- coding: utf-8 -*-
'''
Virtual machine image management tools
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import os
import shutil
import logging
import tempfile
# Import salt libs
import salt.crypt
import salt.utils.cloud
import salt.utils.files
import salt.config
import salt.syspaths
import uuid
# Set up logging
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
'apply_': 'apply'
}
def _file_or_content(file_):
if os.path.exists(file_):
with salt.utils.files.fopen(file_) as fic:
return salt.utils.stringutils.to_unicode(fic.read())
return file_
def prep_bootstrap(mpt):
'''
Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp
'''
# Verify that the boostrap script is downloaded
bs_ = __salt__['config.gather_bootstrap_script']()
fpd_ = os.path.join(mpt, 'tmp', "{0}".format(
uuid.uuid4()))
if not os.path.exists(fpd_):
os.makedirs(fpd_)
os.chmod(fpd_, 0o700)
fp_ = os.path.join(fpd_, os.path.basename(bs_))
# Copy script into tmp
shutil.copy(bs_, fp_)
tmppath = fpd_.replace(mpt, '')
return fp_, tmppath
def _mount(path, ftype, root=None):
mpt = None
if ftype == 'block':
mpt = tempfile.mkdtemp()
if not __salt__['mount.mount'](mpt, path):
os.rmdir(mpt)
return None
elif ftype == 'dir':
return path
elif ftype == 'file':
if 'guestfs.mount' in __salt__:
util = 'guestfs'
elif 'qemu_nbd.init' in __salt__:
util = 'qemu_nbd'
else:
return None
mpt = __salt__['mount.mount'](path, device=root, util=util)
if not mpt:
return None
return mpt
def _umount(mpt, ftype):
if ftype == 'block':
__salt__['mount.umount'](mpt)
os.rmdir(mpt)
elif ftype == 'file':
__salt__['mount.umount'](mpt, util='qemu_nbd')
def apply_(path, id_=None, config=None, approve_key=True, install=True,
prep_install=False, pub_key=None, priv_key=None, mount_point=None):
'''
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.
CLI Example:
.. code-block:: bash
salt 'minion' seed.apply path id [config=config_data] \\
[gen_key=(true|false)] [approve_key=(true|false)] \\
[install=(true|false)]
path
Full path to the directory, device, or disk image on the target
minion's file system.
id
Minion id with which to seed the path.
config
Minion configuration options. By default, the 'master' option is set to
the target host's 'master'.
approve_key
Request a pre-approval of the generated minion key. Requires
that the salt-master be configured to either auto-accept all keys or
expect a signing request from the target host. Default: true.
install
Install salt-minion, if absent. Default: true.
prep_install
Prepare the bootstrap script, but don't run it. Default: false
'''
stats = __salt__['file.stats'](path, follow_symlinks=True)
if not stats:
return '{0} does not exist'.format(path)
ftype = stats['type']
path = stats['target']
log.debug('Mounting %s at %s', ftype, path)
try:
os.makedirs(path)
except OSError:
# The directory already exists
pass
mpt = _mount(path, ftype, mount_point)
if not mpt:
return '{0} could not be mounted'.format(path)
tmp = os.path.join(mpt, 'tmp')
log.debug('Attempting to create directory %s', tmp)
try:
os.makedirs(tmp)
except OSError:
if not os.path.isdir(tmp):
raise
cfg_files = mkconfig(config, tmp=tmp, id_=id_, approve_key=approve_key,
pub_key=pub_key, priv_key=priv_key)
if _check_install(mpt):
# salt-minion is already installed, just move the config and keys
# into place
log.info('salt-minion pre-installed on image, '
'configuring as %s', id_)
minion_config = salt.config.minion_config(cfg_files['config'])
pki_dir = minion_config['pki_dir']
if not os.path.isdir(os.path.join(mpt, pki_dir.lstrip('/'))):
__salt__['file.makedirs'](
os.path.join(mpt, pki_dir.lstrip('/'), '')
)
os.rename(cfg_files['privkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pem'))
os.rename(cfg_files['pubkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pub'))
os.rename(cfg_files['config'], os.path.join(mpt, 'etc/salt/minion'))
res = True
elif install:
log.info('Attempting to install salt-minion to %s', mpt)
res = _install(mpt)
elif prep_install:
log.error('The prep_install option is no longer supported. Please use '
'the bootstrap script installed with Salt, located at %s.',
salt.syspaths.BOOTSTRAP)
res = False
else:
log.warning('No useful action performed on %s', mpt)
res = False
_umount(mpt, ftype)
return res
def mkconfig(config=None,
tmp=None,
id_=None,
approve_key=True,
pub_key=None,
priv_key=None):
'''
Generate keys and config and put them in a tmp directory.
pub_key
absolute path or file content of an optional preseeded salt key
priv_key
absolute path or file content of an optional preseeded salt key
CLI Example:
.. code-block:: bash
salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\
[id_=minion_id] [approve_key=(true|false)]
'''
if tmp is None:
tmp = tempfile.mkdtemp()
if config is None:
config = {}
if 'master' not in config and __opts__['master'] != 'salt':
config['master'] = __opts__['master']
if id_:
config['id'] = id_
# Write the new minion's config to a tmp file
tmp_config = os.path.join(tmp, 'minion')
with salt.utils.files.fopen(tmp_config, 'w+') as fp_:
fp_.write(salt.utils.cloud.salt_config_to_yaml(config))
# Generate keys for the minion
pubkeyfn = os.path.join(tmp, 'minion.pub')
privkeyfn = os.path.join(tmp, 'minion.pem')
preseeded = pub_key and priv_key
if preseeded:
log.debug('Writing minion.pub to %s', pubkeyfn)
log.debug('Writing minion.pem to %s', privkeyfn)
with salt.utils.files.fopen(pubkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key)))
with salt.utils.files.fopen(privkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key)))
os.chmod(pubkeyfn, 0o600)
os.chmod(privkeyfn, 0o600)
else:
salt.crypt.gen_keys(tmp, 'minion', 2048)
if approve_key and not preseeded:
with salt.utils.files.fopen(pubkeyfn) as fp_:
pubkey = salt.utils.stringutils.to_unicode(fp_.read())
__salt__['pillar.ext']({'virtkey': [id_, pubkey]})
return {'config': tmp_config, 'pubkey': pubkeyfn, 'privkey': privkeyfn}
def _check_resolv(mpt):
'''
Check that the resolv.conf is present and populated
'''
resolv = os.path.join(mpt, 'etc/resolv.conf')
replace = False
if os.path.islink(resolv):
resolv = os.path.realpath(resolv)
if not os.path.isdir(os.path.dirname(resolv)):
os.makedirs(os.path.dirname(resolv))
if not os.path.isfile(resolv):
replace = True
if not replace:
with salt.utils.files.fopen(resolv, 'rb') as fp_:
conts = salt.utils.stringutils.to_unicode(fp_.read())
if 'nameserver' not in conts:
replace = True
if 'nameserver 127.0.0.1' in conts:
replace = True
if replace:
shutil.copy('/etc/resolv.conf', resolv)
def _check_install(root):
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
cmd = ('if ! type salt-minion; then exit 1; fi')
cmd = 'chroot \'{0}\' {1} -c \'{2}\''.format(
root,
sh_,
cmd)
return not __salt__['cmd.retcode'](cmd,
output_loglevel='quiet',
python_shell=True)
|
saltstack/salt
|
salt/modules/seed.py
|
_check_resolv
|
python
|
def _check_resolv(mpt):
'''
Check that the resolv.conf is present and populated
'''
resolv = os.path.join(mpt, 'etc/resolv.conf')
replace = False
if os.path.islink(resolv):
resolv = os.path.realpath(resolv)
if not os.path.isdir(os.path.dirname(resolv)):
os.makedirs(os.path.dirname(resolv))
if not os.path.isfile(resolv):
replace = True
if not replace:
with salt.utils.files.fopen(resolv, 'rb') as fp_:
conts = salt.utils.stringutils.to_unicode(fp_.read())
if 'nameserver' not in conts:
replace = True
if 'nameserver 127.0.0.1' in conts:
replace = True
if replace:
shutil.copy('/etc/resolv.conf', resolv)
|
Check that the resolv.conf is present and populated
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/seed.py#L264-L284
| null |
# -*- coding: utf-8 -*-
'''
Virtual machine image management tools
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import os
import shutil
import logging
import tempfile
# Import salt libs
import salt.crypt
import salt.utils.cloud
import salt.utils.files
import salt.config
import salt.syspaths
import uuid
# Set up logging
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
'apply_': 'apply'
}
def _file_or_content(file_):
if os.path.exists(file_):
with salt.utils.files.fopen(file_) as fic:
return salt.utils.stringutils.to_unicode(fic.read())
return file_
def prep_bootstrap(mpt):
'''
Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp
'''
# Verify that the boostrap script is downloaded
bs_ = __salt__['config.gather_bootstrap_script']()
fpd_ = os.path.join(mpt, 'tmp', "{0}".format(
uuid.uuid4()))
if not os.path.exists(fpd_):
os.makedirs(fpd_)
os.chmod(fpd_, 0o700)
fp_ = os.path.join(fpd_, os.path.basename(bs_))
# Copy script into tmp
shutil.copy(bs_, fp_)
tmppath = fpd_.replace(mpt, '')
return fp_, tmppath
def _mount(path, ftype, root=None):
mpt = None
if ftype == 'block':
mpt = tempfile.mkdtemp()
if not __salt__['mount.mount'](mpt, path):
os.rmdir(mpt)
return None
elif ftype == 'dir':
return path
elif ftype == 'file':
if 'guestfs.mount' in __salt__:
util = 'guestfs'
elif 'qemu_nbd.init' in __salt__:
util = 'qemu_nbd'
else:
return None
mpt = __salt__['mount.mount'](path, device=root, util=util)
if not mpt:
return None
return mpt
def _umount(mpt, ftype):
if ftype == 'block':
__salt__['mount.umount'](mpt)
os.rmdir(mpt)
elif ftype == 'file':
__salt__['mount.umount'](mpt, util='qemu_nbd')
def apply_(path, id_=None, config=None, approve_key=True, install=True,
prep_install=False, pub_key=None, priv_key=None, mount_point=None):
'''
Seed a location (disk image, directory, or block device) with the
minion config, approve the minion's key, and/or install salt-minion.
CLI Example:
.. code-block:: bash
salt 'minion' seed.apply path id [config=config_data] \\
[gen_key=(true|false)] [approve_key=(true|false)] \\
[install=(true|false)]
path
Full path to the directory, device, or disk image on the target
minion's file system.
id
Minion id with which to seed the path.
config
Minion configuration options. By default, the 'master' option is set to
the target host's 'master'.
approve_key
Request a pre-approval of the generated minion key. Requires
that the salt-master be configured to either auto-accept all keys or
expect a signing request from the target host. Default: true.
install
Install salt-minion, if absent. Default: true.
prep_install
Prepare the bootstrap script, but don't run it. Default: false
'''
stats = __salt__['file.stats'](path, follow_symlinks=True)
if not stats:
return '{0} does not exist'.format(path)
ftype = stats['type']
path = stats['target']
log.debug('Mounting %s at %s', ftype, path)
try:
os.makedirs(path)
except OSError:
# The directory already exists
pass
mpt = _mount(path, ftype, mount_point)
if not mpt:
return '{0} could not be mounted'.format(path)
tmp = os.path.join(mpt, 'tmp')
log.debug('Attempting to create directory %s', tmp)
try:
os.makedirs(tmp)
except OSError:
if not os.path.isdir(tmp):
raise
cfg_files = mkconfig(config, tmp=tmp, id_=id_, approve_key=approve_key,
pub_key=pub_key, priv_key=priv_key)
if _check_install(mpt):
# salt-minion is already installed, just move the config and keys
# into place
log.info('salt-minion pre-installed on image, '
'configuring as %s', id_)
minion_config = salt.config.minion_config(cfg_files['config'])
pki_dir = minion_config['pki_dir']
if not os.path.isdir(os.path.join(mpt, pki_dir.lstrip('/'))):
__salt__['file.makedirs'](
os.path.join(mpt, pki_dir.lstrip('/'), '')
)
os.rename(cfg_files['privkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pem'))
os.rename(cfg_files['pubkey'], os.path.join(
mpt, pki_dir.lstrip('/'), 'minion.pub'))
os.rename(cfg_files['config'], os.path.join(mpt, 'etc/salt/minion'))
res = True
elif install:
log.info('Attempting to install salt-minion to %s', mpt)
res = _install(mpt)
elif prep_install:
log.error('The prep_install option is no longer supported. Please use '
'the bootstrap script installed with Salt, located at %s.',
salt.syspaths.BOOTSTRAP)
res = False
else:
log.warning('No useful action performed on %s', mpt)
res = False
_umount(mpt, ftype)
return res
def mkconfig(config=None,
tmp=None,
id_=None,
approve_key=True,
pub_key=None,
priv_key=None):
'''
Generate keys and config and put them in a tmp directory.
pub_key
absolute path or file content of an optional preseeded salt key
priv_key
absolute path or file content of an optional preseeded salt key
CLI Example:
.. code-block:: bash
salt 'minion' seed.mkconfig [config=config_data] [tmp=tmp_dir] \\
[id_=minion_id] [approve_key=(true|false)]
'''
if tmp is None:
tmp = tempfile.mkdtemp()
if config is None:
config = {}
if 'master' not in config and __opts__['master'] != 'salt':
config['master'] = __opts__['master']
if id_:
config['id'] = id_
# Write the new minion's config to a tmp file
tmp_config = os.path.join(tmp, 'minion')
with salt.utils.files.fopen(tmp_config, 'w+') as fp_:
fp_.write(salt.utils.cloud.salt_config_to_yaml(config))
# Generate keys for the minion
pubkeyfn = os.path.join(tmp, 'minion.pub')
privkeyfn = os.path.join(tmp, 'minion.pem')
preseeded = pub_key and priv_key
if preseeded:
log.debug('Writing minion.pub to %s', pubkeyfn)
log.debug('Writing minion.pem to %s', privkeyfn)
with salt.utils.files.fopen(pubkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(pub_key)))
with salt.utils.files.fopen(privkeyfn, 'w') as fic:
fic.write(salt.utils.stringutils.to_str(_file_or_content(priv_key)))
os.chmod(pubkeyfn, 0o600)
os.chmod(privkeyfn, 0o600)
else:
salt.crypt.gen_keys(tmp, 'minion', 2048)
if approve_key and not preseeded:
with salt.utils.files.fopen(pubkeyfn) as fp_:
pubkey = salt.utils.stringutils.to_unicode(fp_.read())
__salt__['pillar.ext']({'virtkey': [id_, pubkey]})
return {'config': tmp_config, 'pubkey': pubkeyfn, 'privkey': privkeyfn}
def _install(mpt):
'''
Determine whether salt-minion is installed and, if not,
install it.
Return True if install is successful or already installed.
'''
_check_resolv(mpt)
boot_, tmppath = (prep_bootstrap(mpt)
or salt.syspaths.BOOTSTRAP)
# Exec the chroot command
cmd = 'if type salt-minion; then exit 0; '
cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh'))
return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode']
def _check_install(root):
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
cmd = ('if ! type salt-minion; then exit 1; fi')
cmd = 'chroot \'{0}\' {1} -c \'{2}\''.format(
root,
sh_,
cmd)
return not __salt__['cmd.retcode'](cmd,
output_loglevel='quiet',
python_shell=True)
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
_netsh_file
|
python
|
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
|
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L99-L126
| null |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
get_settings
|
python
|
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
|
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L148-L224
|
[
"def _netsh_command(command, store):\n if store.lower() not in ('local', 'lgpo'):\n raise ValueError('Incorrect store: {0}'.format(store))\n # set the store for local or lgpo\n if store.lower() == 'local':\n netsh_script = dedent('''\\\n advfirewall\n set store local\n {0}\n '''.format(command))\n else:\n netsh_script = dedent('''\\\n advfirewall\n set store gpo = {0}\n {1}\n '''.format(__hostname__, command))\n return _netsh_file(content=netsh_script).splitlines()\n"
] |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
get_all_settings
|
python
|
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
|
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L227-L257
|
[
"def get_settings(profile, section, store='local'):\n '''\n Get the firewall property from the specified profile in the specified store\n as returned by ``netsh advfirewall``.\n\n Args:\n\n profile (str):\n The firewall profile to query. Valid options are:\n\n - domain\n - public\n - private\n\n section (str):\n The property to query within the selected profile. Valid options\n are:\n\n - firewallpolicy : inbound/outbound behavior\n - logging : firewall logging settings\n - settings : firewall properties\n - state : firewalls state (on | off)\n\n store (str):\n The store to use. This is either the local firewall policy or the\n policy defined by local group policy. Valid options are:\n\n - lgpo\n - local\n\n Default is ``local``\n\n Returns:\n dict: A dictionary containing the properties for the specified profile\n\n Raises:\n CommandExecutionError: If an error occurs\n ValueError: If the parameters are incorrect\n '''\n # validate input\n if profile.lower() not in ('domain', 'public', 'private'):\n raise ValueError('Incorrect profile: {0}'.format(profile))\n if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):\n raise ValueError('Incorrect section: {0}'.format(section))\n if store.lower() not in ('local', 'lgpo'):\n raise ValueError('Incorrect store: {0}'.format(store))\n command = 'show {0}profile {1}'.format(profile, section)\n # run it\n results = _netsh_command(command=command, store=store)\n # sample output:\n # Domain Profile Settings:\n # ----------------------------------------------------------------------\n # LocalFirewallRules N/A (GPO-store only)\n # LocalConSecRules N/A (GPO-store only)\n # InboundUserNotification Disable\n # RemoteManagement Disable\n # UnicastResponseToMulticast Enable\n\n # if it's less than 3 lines it failed\n if len(results) < 3:\n raise CommandExecutionError('Invalid results: {0}'.format(results))\n ret = {}\n # Skip the first 2 lines. Add everything else to a dictionary\n for line in results[3:]:\n ret.update(dict(list(zip(*[iter(re.split(r\"\\s{2,}\", line))]*2))))\n\n # Remove spaces from the values so that `Not Configured` is detected\n # correctly\n for item in ret:\n ret[item] = ret[item].replace(' ', '')\n\n # special handling for firewallpolicy\n if section == 'firewallpolicy':\n inbound, outbound = ret['Firewall Policy'].split(',')\n return {'Inbound': inbound, 'Outbound': outbound}\n\n return ret\n"
] |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
get_all_profiles
|
python
|
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
|
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L260-L282
|
[
"def get_all_settings(profile, store='local'):\n '''\n Gets all the properties for the specified profile in the specified store\n\n Args:\n\n profile (str):\n The firewall profile to query. Valid options are:\n\n - domain\n - public\n - private\n\n store (str):\n The store to use. This is either the local firewall policy or the\n policy defined by local group policy. Valid options are:\n\n - lgpo\n - local\n\n Default is ``local``\n\n Returns:\n dict: A dictionary containing the specified settings\n '''\n ret = dict()\n ret.update(get_settings(profile=profile, section='state', store=store))\n ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))\n ret.update(get_settings(profile=profile, section='settings', store=store))\n ret.update(get_settings(profile=profile, section='logging', store=store))\n return ret\n"
] |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
set_firewall_settings
|
python
|
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L285-L374
|
[
"def get_settings(profile, section, store='local'):\n '''\n Get the firewall property from the specified profile in the specified store\n as returned by ``netsh advfirewall``.\n\n Args:\n\n profile (str):\n The firewall profile to query. Valid options are:\n\n - domain\n - public\n - private\n\n section (str):\n The property to query within the selected profile. Valid options\n are:\n\n - firewallpolicy : inbound/outbound behavior\n - logging : firewall logging settings\n - settings : firewall properties\n - state : firewalls state (on | off)\n\n store (str):\n The store to use. This is either the local firewall policy or the\n policy defined by local group policy. Valid options are:\n\n - lgpo\n - local\n\n Default is ``local``\n\n Returns:\n dict: A dictionary containing the properties for the specified profile\n\n Raises:\n CommandExecutionError: If an error occurs\n ValueError: If the parameters are incorrect\n '''\n # validate input\n if profile.lower() not in ('domain', 'public', 'private'):\n raise ValueError('Incorrect profile: {0}'.format(profile))\n if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):\n raise ValueError('Incorrect section: {0}'.format(section))\n if store.lower() not in ('local', 'lgpo'):\n raise ValueError('Incorrect store: {0}'.format(store))\n command = 'show {0}profile {1}'.format(profile, section)\n # run it\n results = _netsh_command(command=command, store=store)\n # sample output:\n # Domain Profile Settings:\n # ----------------------------------------------------------------------\n # LocalFirewallRules N/A (GPO-store only)\n # LocalConSecRules N/A (GPO-store only)\n # InboundUserNotification Disable\n # RemoteManagement Disable\n # UnicastResponseToMulticast Enable\n\n # if it's less than 3 lines it failed\n if len(results) < 3:\n raise CommandExecutionError('Invalid results: {0}'.format(results))\n ret = {}\n # Skip the first 2 lines. Add everything else to a dictionary\n for line in results[3:]:\n ret.update(dict(list(zip(*[iter(re.split(r\"\\s{2,}\", line))]*2))))\n\n # Remove spaces from the values so that `Not Configured` is detected\n # correctly\n for item in ret:\n ret[item] = ret[item].replace(' ', '')\n\n # special handling for firewallpolicy\n if section == 'firewallpolicy':\n inbound, outbound = ret['Firewall Policy'].split(',')\n return {'Inbound': inbound, 'Outbound': outbound}\n\n return ret\n",
"def _netsh_command(command, store):\n if store.lower() not in ('local', 'lgpo'):\n raise ValueError('Incorrect store: {0}'.format(store))\n # set the store for local or lgpo\n if store.lower() == 'local':\n netsh_script = dedent('''\\\n advfirewall\n set store local\n {0}\n '''.format(command))\n else:\n netsh_script = dedent('''\\\n advfirewall\n set store gpo = {0}\n {1}\n '''.format(__hostname__, command))\n return _netsh_file(content=netsh_script).splitlines()\n"
] |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
set_logging_settings
|
python
|
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L377-L470
| null |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
set_settings
|
python
|
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L473-L538
|
[
"def _netsh_command(command, store):\n if store.lower() not in ('local', 'lgpo'):\n raise ValueError('Incorrect store: {0}'.format(store))\n # set the store for local or lgpo\n if store.lower() == 'local':\n netsh_script = dedent('''\\\n advfirewall\n set store local\n {0}\n '''.format(command))\n else:\n netsh_script = dedent('''\\\n advfirewall\n set store gpo = {0}\n {1}\n '''.format(__hostname__, command))\n return _netsh_file(content=netsh_script).splitlines()\n"
] |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/utils/win_lgpo_netsh.py
|
set_state
|
python
|
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_lgpo_netsh.py#L541-L591
|
[
"def _netsh_command(command, store):\n if store.lower() not in ('local', 'lgpo'):\n raise ValueError('Incorrect store: {0}'.format(store))\n # set the store for local or lgpo\n if store.lower() == 'local':\n netsh_script = dedent('''\\\n advfirewall\n set store local\n {0}\n '''.format(command))\n else:\n netsh_script = dedent('''\\\n advfirewall\n set store gpo = {0}\n {1}\n '''.format(__hostname__, command))\n return _netsh_file(content=netsh_script).splitlines()\n"
] |
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('%s:\n%s', fp.name, content)
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
|
saltstack/salt
|
salt/modules/aliases.py
|
__parse_aliases
|
python
|
def __parse_aliases():
'''
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
'''
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
with salt.utils.files.fopen(afn, 'r') as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
if match:
ret.append(match.groups())
else:
ret.append((None, None, line.strip()))
return ret
|
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L40-L61
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n",
"def __get_aliases_filename():\n '''\n Return the path to the appropriate aliases file\n '''\n return os.path.realpath(__salt__['config.option']('aliases.file'))\n"
] |
# -*- coding: utf-8 -*-
'''
Manage the information in the aliases file
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import re
import stat
import tempfile
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
__outputter__ = {
'rm_alias': 'txt',
'has_target': 'txt',
'get_target': 'txt',
'set_target': 'txt',
'list_aliases': 'yaml',
}
__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
def get_target(alias):
'''
Return the target associated with an alias
CLI Example:
.. code-block:: bash
salt '*' aliases.get_target alias
'''
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
return ''
def has_target(alias, target):
'''
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
'''
if target == '':
raise SaltInvocationError('target can not be an empty string')
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
target = ', '.join(target)
return target == aliases[alias]
def set_target(alias, target):
'''
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
'''
if alias == '':
raise SaltInvocationError('alias can not be an empty string')
if target == '':
raise SaltInvocationError('target can not be an empty string')
if get_target(alias) == target:
return True
lines = __parse_aliases()
out = []
ovr = False
for (line_alias, line_target, line_comment) in lines:
if line_alias == alias:
if not ovr:
out.append((alias, target, line_comment))
ovr = True
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
out.append((alias, target, ''))
__write_aliases_file(out)
return True
def rm_alias(alias):
'''
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
'''
if not get_target(alias):
return True
lines = __parse_aliases()
out = []
for (line_alias, line_target, line_comment) in lines:
if line_alias != alias:
out.append((line_alias, line_target, line_comment))
__write_aliases_file(out)
return True
|
saltstack/salt
|
salt/modules/aliases.py
|
__write_aliases_file
|
python
|
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True
|
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L64-L106
|
[
"def __get_aliases_filename():\n '''\n Return the path to the appropriate aliases file\n '''\n return os.path.realpath(__salt__['config.option']('aliases.file'))\n"
] |
# -*- coding: utf-8 -*-
'''
Manage the information in the aliases file
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import re
import stat
import tempfile
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
__outputter__ = {
'rm_alias': 'txt',
'has_target': 'txt',
'get_target': 'txt',
'set_target': 'txt',
'list_aliases': 'yaml',
}
__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():
'''
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
'''
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
with salt.utils.files.fopen(afn, 'r') as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
if match:
ret.append(match.groups())
else:
ret.append((None, None, line.strip()))
return ret
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
def get_target(alias):
'''
Return the target associated with an alias
CLI Example:
.. code-block:: bash
salt '*' aliases.get_target alias
'''
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
return ''
def has_target(alias, target):
'''
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
'''
if target == '':
raise SaltInvocationError('target can not be an empty string')
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
target = ', '.join(target)
return target == aliases[alias]
def set_target(alias, target):
'''
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
'''
if alias == '':
raise SaltInvocationError('alias can not be an empty string')
if target == '':
raise SaltInvocationError('target can not be an empty string')
if get_target(alias) == target:
return True
lines = __parse_aliases()
out = []
ovr = False
for (line_alias, line_target, line_comment) in lines:
if line_alias == alias:
if not ovr:
out.append((alias, target, line_comment))
ovr = True
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
out.append((alias, target, ''))
__write_aliases_file(out)
return True
def rm_alias(alias):
'''
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
'''
if not get_target(alias):
return True
lines = __parse_aliases()
out = []
for (line_alias, line_target, line_comment) in lines:
if line_alias != alias:
out.append((line_alias, line_target, line_comment))
__write_aliases_file(out)
return True
|
saltstack/salt
|
salt/modules/aliases.py
|
list_aliases
|
python
|
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
|
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L109-L122
|
[
"def __parse_aliases():\n '''\n Parse the aliases file, and return a list of line components:\n\n [\n (alias1, target1, comment1),\n (alias2, target2, comment2),\n ]\n '''\n afn = __get_aliases_filename()\n ret = []\n if not os.path.isfile(afn):\n return ret\n with salt.utils.files.fopen(afn, 'r') as ifile:\n for line in ifile:\n line = salt.utils.stringutils.to_unicode(line)\n match = __ALIAS_RE.match(line)\n if match:\n ret.append(match.groups())\n else:\n ret.append((None, None, line.strip()))\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Manage the information in the aliases file
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import re
import stat
import tempfile
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
__outputter__ = {
'rm_alias': 'txt',
'has_target': 'txt',
'get_target': 'txt',
'set_target': 'txt',
'list_aliases': 'yaml',
}
__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():
'''
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
'''
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
with salt.utils.files.fopen(afn, 'r') as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
if match:
ret.append(match.groups())
else:
ret.append((None, None, line.strip()))
return ret
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True
def get_target(alias):
'''
Return the target associated with an alias
CLI Example:
.. code-block:: bash
salt '*' aliases.get_target alias
'''
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
return ''
def has_target(alias, target):
'''
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
'''
if target == '':
raise SaltInvocationError('target can not be an empty string')
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
target = ', '.join(target)
return target == aliases[alias]
def set_target(alias, target):
'''
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
'''
if alias == '':
raise SaltInvocationError('alias can not be an empty string')
if target == '':
raise SaltInvocationError('target can not be an empty string')
if get_target(alias) == target:
return True
lines = __parse_aliases()
out = []
ovr = False
for (line_alias, line_target, line_comment) in lines:
if line_alias == alias:
if not ovr:
out.append((alias, target, line_comment))
ovr = True
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
out.append((alias, target, ''))
__write_aliases_file(out)
return True
def rm_alias(alias):
'''
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
'''
if not get_target(alias):
return True
lines = __parse_aliases()
out = []
for (line_alias, line_target, line_comment) in lines:
if line_alias != alias:
out.append((line_alias, line_target, line_comment))
__write_aliases_file(out)
return True
|
saltstack/salt
|
salt/modules/aliases.py
|
has_target
|
python
|
def has_target(alias, target):
'''
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
'''
if target == '':
raise SaltInvocationError('target can not be an empty string')
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
target = ', '.join(target)
return target == aliases[alias]
|
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L141-L158
|
[
"def list_aliases():\n '''\n Return the aliases found in the aliases file in this format::\n\n {'alias': 'target'}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' aliases.list_aliases\n '''\n ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Manage the information in the aliases file
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import re
import stat
import tempfile
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
__outputter__ = {
'rm_alias': 'txt',
'has_target': 'txt',
'get_target': 'txt',
'set_target': 'txt',
'list_aliases': 'yaml',
}
__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():
'''
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
'''
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
with salt.utils.files.fopen(afn, 'r') as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
if match:
ret.append(match.groups())
else:
ret.append((None, None, line.strip()))
return ret
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
def get_target(alias):
'''
Return the target associated with an alias
CLI Example:
.. code-block:: bash
salt '*' aliases.get_target alias
'''
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
return ''
def set_target(alias, target):
'''
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
'''
if alias == '':
raise SaltInvocationError('alias can not be an empty string')
if target == '':
raise SaltInvocationError('target can not be an empty string')
if get_target(alias) == target:
return True
lines = __parse_aliases()
out = []
ovr = False
for (line_alias, line_target, line_comment) in lines:
if line_alias == alias:
if not ovr:
out.append((alias, target, line_comment))
ovr = True
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
out.append((alias, target, ''))
__write_aliases_file(out)
return True
def rm_alias(alias):
'''
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
'''
if not get_target(alias):
return True
lines = __parse_aliases()
out = []
for (line_alias, line_target, line_comment) in lines:
if line_alias != alias:
out.append((line_alias, line_target, line_comment))
__write_aliases_file(out)
return True
|
saltstack/salt
|
salt/modules/aliases.py
|
set_target
|
python
|
def set_target(alias, target):
'''
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
'''
if alias == '':
raise SaltInvocationError('alias can not be an empty string')
if target == '':
raise SaltInvocationError('target can not be an empty string')
if get_target(alias) == target:
return True
lines = __parse_aliases()
out = []
ovr = False
for (line_alias, line_target, line_comment) in lines:
if line_alias == alias:
if not ovr:
out.append((alias, target, line_comment))
ovr = True
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
out.append((alias, target, ''))
__write_aliases_file(out)
return True
|
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L161-L197
|
[
"def get_target(alias):\n '''\n Return the target associated with an alias\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' aliases.get_target alias\n '''\n aliases = list_aliases()\n if alias in aliases:\n return aliases[alias]\n return ''\n",
"def __parse_aliases():\n '''\n Parse the aliases file, and return a list of line components:\n\n [\n (alias1, target1, comment1),\n (alias2, target2, comment2),\n ]\n '''\n afn = __get_aliases_filename()\n ret = []\n if not os.path.isfile(afn):\n return ret\n with salt.utils.files.fopen(afn, 'r') as ifile:\n for line in ifile:\n line = salt.utils.stringutils.to_unicode(line)\n match = __ALIAS_RE.match(line)\n if match:\n ret.append(match.groups())\n else:\n ret.append((None, None, line.strip()))\n return ret\n",
"def __write_aliases_file(lines):\n '''\n Write a new copy of the aliases file. Lines is a list of lines\n as returned by __parse_aliases.\n '''\n afn = __get_aliases_filename()\n adir = os.path.dirname(afn)\n\n out = tempfile.NamedTemporaryFile(dir=adir, delete=False)\n\n if not __opts__.get('integration.test', False):\n if os.path.isfile(afn):\n afn_st = os.stat(afn)\n os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))\n os.chown(out.name, afn_st.st_uid, afn_st.st_gid)\n else:\n os.chmod(out.name, 0o644)\n os.chown(out.name, 0, 0)\n\n for (line_alias, line_target, line_comment) in lines:\n if isinstance(line_target, list):\n line_target = ', '.join(line_target)\n if not line_comment:\n line_comment = ''\n if line_alias and line_target:\n write_line = '{0}: {1}{2}\\n'.format(\n line_alias, line_target, line_comment\n )\n else:\n write_line = '{0}\\n'.format(line_comment)\n if six.PY3:\n write_line = write_line.encode(__salt_system_encoding__)\n out.write(write_line)\n\n out.close()\n os.rename(out.name, afn)\n\n # Search $PATH for the newalises command\n newaliases = salt.utils.path.which('newaliases')\n if newaliases is not None:\n __salt__['cmd.run'](newaliases)\n\n return True\n"
] |
# -*- coding: utf-8 -*-
'''
Manage the information in the aliases file
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import re
import stat
import tempfile
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
__outputter__ = {
'rm_alias': 'txt',
'has_target': 'txt',
'get_target': 'txt',
'set_target': 'txt',
'list_aliases': 'yaml',
}
__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():
'''
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
'''
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
with salt.utils.files.fopen(afn, 'r') as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
if match:
ret.append(match.groups())
else:
ret.append((None, None, line.strip()))
return ret
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
def get_target(alias):
'''
Return the target associated with an alias
CLI Example:
.. code-block:: bash
salt '*' aliases.get_target alias
'''
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
return ''
def has_target(alias, target):
'''
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
'''
if target == '':
raise SaltInvocationError('target can not be an empty string')
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
target = ', '.join(target)
return target == aliases[alias]
def rm_alias(alias):
'''
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
'''
if not get_target(alias):
return True
lines = __parse_aliases()
out = []
for (line_alias, line_target, line_comment) in lines:
if line_alias != alias:
out.append((line_alias, line_target, line_comment))
__write_aliases_file(out)
return True
|
saltstack/salt
|
salt/modules/aliases.py
|
rm_alias
|
python
|
def rm_alias(alias):
'''
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
'''
if not get_target(alias):
return True
lines = __parse_aliases()
out = []
for (line_alias, line_target, line_comment) in lines:
if line_alias != alias:
out.append((line_alias, line_target, line_comment))
__write_aliases_file(out)
return True
|
Remove an entry from the aliases file
CLI Example:
.. code-block:: bash
salt '*' aliases.rm_alias alias
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L200-L220
|
[
"def get_target(alias):\n '''\n Return the target associated with an alias\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' aliases.get_target alias\n '''\n aliases = list_aliases()\n if alias in aliases:\n return aliases[alias]\n return ''\n",
"def __parse_aliases():\n '''\n Parse the aliases file, and return a list of line components:\n\n [\n (alias1, target1, comment1),\n (alias2, target2, comment2),\n ]\n '''\n afn = __get_aliases_filename()\n ret = []\n if not os.path.isfile(afn):\n return ret\n with salt.utils.files.fopen(afn, 'r') as ifile:\n for line in ifile:\n line = salt.utils.stringutils.to_unicode(line)\n match = __ALIAS_RE.match(line)\n if match:\n ret.append(match.groups())\n else:\n ret.append((None, None, line.strip()))\n return ret\n",
"def __write_aliases_file(lines):\n '''\n Write a new copy of the aliases file. Lines is a list of lines\n as returned by __parse_aliases.\n '''\n afn = __get_aliases_filename()\n adir = os.path.dirname(afn)\n\n out = tempfile.NamedTemporaryFile(dir=adir, delete=False)\n\n if not __opts__.get('integration.test', False):\n if os.path.isfile(afn):\n afn_st = os.stat(afn)\n os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))\n os.chown(out.name, afn_st.st_uid, afn_st.st_gid)\n else:\n os.chmod(out.name, 0o644)\n os.chown(out.name, 0, 0)\n\n for (line_alias, line_target, line_comment) in lines:\n if isinstance(line_target, list):\n line_target = ', '.join(line_target)\n if not line_comment:\n line_comment = ''\n if line_alias and line_target:\n write_line = '{0}: {1}{2}\\n'.format(\n line_alias, line_target, line_comment\n )\n else:\n write_line = '{0}\\n'.format(line_comment)\n if six.PY3:\n write_line = write_line.encode(__salt_system_encoding__)\n out.write(write_line)\n\n out.close()\n os.rename(out.name, afn)\n\n # Search $PATH for the newalises command\n newaliases = salt.utils.path.which('newaliases')\n if newaliases is not None:\n __salt__['cmd.run'](newaliases)\n\n return True\n"
] |
# -*- coding: utf-8 -*-
'''
Manage the information in the aliases file
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import re
import stat
import tempfile
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Import third party libs
from salt.ext import six
__outputter__ = {
'rm_alias': 'txt',
'has_target': 'txt',
'get_target': 'txt',
'set_target': 'txt',
'list_aliases': 'yaml',
}
__ALIAS_RE = re.compile(r'([^:#]*)\s*:?\s*([^#]*?)(\s+#.*|$)')
def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():
'''
Parse the aliases file, and return a list of line components:
[
(alias1, target1, comment1),
(alias2, target2, comment2),
]
'''
afn = __get_aliases_filename()
ret = []
if not os.path.isfile(afn):
return ret
with salt.utils.files.fopen(afn, 'r') as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
match = __ALIAS_RE.match(line)
if match:
ret.append(match.groups())
else:
ret.append((None, None, line.strip()))
return ret
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True
def list_aliases():
'''
Return the aliases found in the aliases file in this format::
{'alias': 'target'}
CLI Example:
.. code-block:: bash
salt '*' aliases.list_aliases
'''
ret = dict((alias, target) for alias, target, comment in __parse_aliases() if alias)
return ret
def get_target(alias):
'''
Return the target associated with an alias
CLI Example:
.. code-block:: bash
salt '*' aliases.get_target alias
'''
aliases = list_aliases()
if alias in aliases:
return aliases[alias]
return ''
def has_target(alias, target):
'''
Return true if the alias/target is set
CLI Example:
.. code-block:: bash
salt '*' aliases.has_target alias target
'''
if target == '':
raise SaltInvocationError('target can not be an empty string')
aliases = list_aliases()
if alias not in aliases:
return False
if isinstance(target, list):
target = ', '.join(target)
return target == aliases[alias]
def set_target(alias, target):
'''
Set the entry in the aliases file for the given alias, this will overwrite
any previous entry for the given alias or create a new one if it does not
exist.
CLI Example:
.. code-block:: bash
salt '*' aliases.set_target alias target
'''
if alias == '':
raise SaltInvocationError('alias can not be an empty string')
if target == '':
raise SaltInvocationError('target can not be an empty string')
if get_target(alias) == target:
return True
lines = __parse_aliases()
out = []
ovr = False
for (line_alias, line_target, line_comment) in lines:
if line_alias == alias:
if not ovr:
out.append((alias, target, line_comment))
ovr = True
else:
out.append((line_alias, line_target, line_comment))
if not ovr:
out.append((alias, target, ''))
__write_aliases_file(out)
return True
|
saltstack/salt
|
salt/thorium/__init__.py
|
ThorState.gather_cache
|
python
|
def gather_cache(self):
'''
Gather the specified data from the minion data cache
'''
cache = {'grains': {}, 'pillar': {}}
if self.grains or self.pillar:
if self.opts.get('minion_data_cache'):
minions = self.cache.list('minions')
if not minions:
return cache
for minion in minions:
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
if 'pillar' in total:
if self.pillar_keys:
for key in self.pillar_keys:
if key in total['pillar']:
cache['pillar'][minion][key] = total['pillar'][key]
else:
cache['pillar'][minion] = total['pillar']
else:
cache['pillar'][minion] = {}
if 'grains' in total:
if self.grain_keys:
for key in self.grain_keys:
if key in total['grains']:
cache['grains'][minion][key] = total['grains'][key]
else:
cache['grains'][minion] = total['grains']
else:
cache['grains'][minion] = {}
return cache
|
Gather the specified data from the minion data cache
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L70-L102
| null |
class ThorState(salt.state.HighState):
'''
Compile the thorium state and manage it in the thorium runtime
'''
def __init__(
self,
opts,
grains=False,
grain_keys=None,
pillar=False,
pillar_keys=None):
self.grains = grains
self.grain_keys = grain_keys
self.pillar = pillar
self.pillar_keys = pillar_keys
opts['file_roots'] = opts['thorium_roots']
opts['saltenv'] = opts['thoriumenv']
opts['state_top'] = opts['thorium_top']
opts['file_client'] = 'local'
self.opts = opts
if opts.get('minion_data_cache'):
self.cache = salt.cache.factory(opts)
salt.state.HighState.__init__(self, self.opts, loader='thorium')
self.returners = salt.loader.returners(self.opts, {})
self.reg_ret = self.opts.get('register_returner', None)
regdata = {}
if self.reg_ret is not None:
try:
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
except Exception as exc:
log.error(exc)
self.state.inject_globals = {'__reg__': regdata}
self.event = salt.utils.event.get_master_event(
self.opts,
self.opts['sock_dir'])
def start_runtime(self):
'''
Start the system!
'''
while True:
try:
self.call_runtime()
except Exception:
log.error('Exception in Thorium: ', exc_info=True)
time.sleep(self.opts['thorium_interval'])
def get_chunks(self, exclude=None, whitelist=None):
'''
Compile the top file and return the lowstate for the thorium runtime
to iterate over
'''
ret = {}
err = []
try:
top = self.get_top()
except SaltRenderError as err:
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file found!'
raise SaltRenderError(msg)
matches = self.matches_whitelist(matches, whitelist)
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
high, ext_errors = self.state.reconcile_extend(high)
err += ext_errors
err += self.state.verify_high(high)
if err:
raise SaltRenderError(err)
return self.state.compile_high_data(high)
def get_events(self):
'''
iterate over the available events and return a list of events
'''
ret = []
while True:
event = self.event.get_event(wait=1, full=True)
if event is None:
return ret
ret.append(event)
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time()
|
saltstack/salt
|
salt/thorium/__init__.py
|
ThorState.start_runtime
|
python
|
def start_runtime(self):
'''
Start the system!
'''
while True:
try:
self.call_runtime()
except Exception:
log.error('Exception in Thorium: ', exc_info=True)
time.sleep(self.opts['thorium_interval'])
|
Start the system!
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L104-L113
|
[
"def call_runtime(self):\n '''\n Execute the runtime\n '''\n cache = self.gather_cache()\n chunks = self.get_chunks()\n interval = self.opts['thorium_interval']\n recompile = self.opts.get('thorium_recompile', 300)\n r_start = time.time()\n while True:\n events = self.get_events()\n if not events:\n time.sleep(interval)\n continue\n start = time.time()\n self.state.inject_globals['__events__'] = events\n self.state.call_chunks(chunks)\n elapsed = time.time() - start\n left = interval - elapsed\n if left > 0:\n time.sleep(left)\n self.state.reset_run_num()\n if (start - r_start) > recompile:\n cache = self.gather_cache()\n chunks = self.get_chunks()\n if self.reg_ret is not None:\n self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)\n r_start = time.time()\n"
] |
class ThorState(salt.state.HighState):
'''
Compile the thorium state and manage it in the thorium runtime
'''
def __init__(
self,
opts,
grains=False,
grain_keys=None,
pillar=False,
pillar_keys=None):
self.grains = grains
self.grain_keys = grain_keys
self.pillar = pillar
self.pillar_keys = pillar_keys
opts['file_roots'] = opts['thorium_roots']
opts['saltenv'] = opts['thoriumenv']
opts['state_top'] = opts['thorium_top']
opts['file_client'] = 'local'
self.opts = opts
if opts.get('minion_data_cache'):
self.cache = salt.cache.factory(opts)
salt.state.HighState.__init__(self, self.opts, loader='thorium')
self.returners = salt.loader.returners(self.opts, {})
self.reg_ret = self.opts.get('register_returner', None)
regdata = {}
if self.reg_ret is not None:
try:
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
except Exception as exc:
log.error(exc)
self.state.inject_globals = {'__reg__': regdata}
self.event = salt.utils.event.get_master_event(
self.opts,
self.opts['sock_dir'])
def gather_cache(self):
'''
Gather the specified data from the minion data cache
'''
cache = {'grains': {}, 'pillar': {}}
if self.grains or self.pillar:
if self.opts.get('minion_data_cache'):
minions = self.cache.list('minions')
if not minions:
return cache
for minion in minions:
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
if 'pillar' in total:
if self.pillar_keys:
for key in self.pillar_keys:
if key in total['pillar']:
cache['pillar'][minion][key] = total['pillar'][key]
else:
cache['pillar'][minion] = total['pillar']
else:
cache['pillar'][minion] = {}
if 'grains' in total:
if self.grain_keys:
for key in self.grain_keys:
if key in total['grains']:
cache['grains'][minion][key] = total['grains'][key]
else:
cache['grains'][minion] = total['grains']
else:
cache['grains'][minion] = {}
return cache
def get_chunks(self, exclude=None, whitelist=None):
'''
Compile the top file and return the lowstate for the thorium runtime
to iterate over
'''
ret = {}
err = []
try:
top = self.get_top()
except SaltRenderError as err:
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file found!'
raise SaltRenderError(msg)
matches = self.matches_whitelist(matches, whitelist)
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
high, ext_errors = self.state.reconcile_extend(high)
err += ext_errors
err += self.state.verify_high(high)
if err:
raise SaltRenderError(err)
return self.state.compile_high_data(high)
def get_events(self):
'''
iterate over the available events and return a list of events
'''
ret = []
while True:
event = self.event.get_event(wait=1, full=True)
if event is None:
return ret
ret.append(event)
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time()
|
saltstack/salt
|
salt/thorium/__init__.py
|
ThorState.get_chunks
|
python
|
def get_chunks(self, exclude=None, whitelist=None):
'''
Compile the top file and return the lowstate for the thorium runtime
to iterate over
'''
ret = {}
err = []
try:
top = self.get_top()
except SaltRenderError as err:
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file found!'
raise SaltRenderError(msg)
matches = self.matches_whitelist(matches, whitelist)
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
high, ext_errors = self.state.reconcile_extend(high)
err += ext_errors
err += self.state.verify_high(high)
if err:
raise SaltRenderError(err)
return self.state.compile_high_data(high)
|
Compile the top file and return the lowstate for the thorium runtime
to iterate over
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L115-L150
|
[
"def verify_tops(self, tops):\n '''\n Verify the contents of the top file data\n '''\n errors = []\n if not isinstance(tops, dict):\n errors.append('Top data was not formed as a dict')\n # No further checks will work, bail out\n return errors\n for saltenv, matches in six.iteritems(tops):\n if saltenv == 'include':\n continue\n if not isinstance(saltenv, six.string_types):\n errors.append(\n 'Environment {0} in top file is not formed as a '\n 'string'.format(saltenv)\n )\n if saltenv == '':\n errors.append('Empty saltenv statement in top file')\n if not isinstance(matches, dict):\n errors.append(\n 'The top file matches for saltenv {0} are not '\n 'formatted as a dict'.format(saltenv)\n )\n for slsmods in six.itervalues(matches):\n if not isinstance(slsmods, list):\n errors.append('Malformed topfile (state declarations not '\n 'formed as a list)')\n continue\n for slsmod in slsmods:\n if isinstance(slsmod, dict):\n # This value is a match option\n for val in six.itervalues(slsmod):\n if not val:\n errors.append(\n 'Improperly formatted top file matcher '\n 'in saltenv {0}: {1} file'.format(\n slsmod,\n val\n )\n )\n elif isinstance(slsmod, six.string_types):\n # This is a sls module\n if not slsmod:\n errors.append(\n 'Environment {0} contains an empty sls '\n 'index'.format(saltenv)\n )\n\n return errors\n",
"def get_top(self):\n '''\n Returns the high data derived from the top file\n '''\n try:\n tops = self.get_tops()\n except SaltRenderError as err:\n log.error('Unable to render top file: %s', err.error)\n return {}\n return self.merge_tops(tops)\n",
"def top_matches(self, top):\n '''\n Search through the top high data for matches and return the states\n that this minion needs to execute.\n\n Returns:\n {'saltenv': ['state1', 'state2', ...]}\n '''\n matches = DefaultOrderedDict(OrderedDict)\n # pylint: disable=cell-var-from-loop\n for saltenv, body in six.iteritems(top):\n if self.opts['saltenv']:\n if saltenv != self.opts['saltenv']:\n continue\n for match, data in six.iteritems(body):\n def _filter_matches(_match, _data, _opts):\n if isinstance(_data, six.string_types):\n _data = [_data]\n if self.matchers['confirm_top.confirm_top'](\n _match,\n _data,\n _opts\n ):\n if saltenv not in matches:\n matches[saltenv] = []\n for item in _data:\n if 'subfilter' in item:\n _tmpdata = item.pop('subfilter')\n for match, data in six.iteritems(_tmpdata):\n _filter_matches(match, data, _opts)\n if isinstance(item, six.string_types):\n matches[saltenv].append(item)\n elif isinstance(item, dict):\n env_key, inc_sls = item.popitem()\n if env_key not in self.avail:\n continue\n if env_key not in matches:\n matches[env_key] = []\n matches[env_key].append(inc_sls)\n _filter_matches(match, data, self.opts['nodegroups'])\n ext_matches = self._master_tops()\n for saltenv in ext_matches:\n top_file_matches = matches.get(saltenv, [])\n if self.opts.get('master_tops_first'):\n first = ext_matches[saltenv]\n second = top_file_matches\n else:\n first = top_file_matches\n second = ext_matches[saltenv]\n matches[saltenv] = first + [x for x in second if x not in first]\n\n # pylint: enable=cell-var-from-loop\n return matches\n",
"def render_highstate(self, matches):\n '''\n Gather the state files and render them into a single unified salt\n high data structure.\n '''\n highstate = self.building_highstate\n all_errors = []\n mods = set()\n statefiles = []\n for saltenv, states in six.iteritems(matches):\n for sls_match in states:\n if saltenv in self.avail:\n statefiles = fnmatch.filter(self.avail[saltenv], sls_match)\n elif '__env__' in self.avail:\n statefiles = fnmatch.filter(self.avail['__env__'], sls_match)\n else:\n all_errors.append(\n 'No matching salt environment for environment '\n '\\'{0}\\' found'.format(saltenv)\n )\n # if we did not found any sls in the fileserver listing, this\n # may be because the sls was generated or added later, we can\n # try to directly execute it, and if it fails, anyway it will\n # return the former error\n if not statefiles:\n statefiles = [sls_match]\n\n for sls in statefiles:\n r_env = '{0}:{1}'.format(saltenv, sls)\n if r_env in mods:\n continue\n state, errors = self.render_state(\n sls, saltenv, mods, matches)\n if state:\n self.merge_included_states(highstate, state, errors)\n for i, error in enumerate(errors[:]):\n if 'is not available' in error:\n # match SLS foobar in environment\n this_sls = 'SLS {0} in saltenv'.format(\n sls_match)\n if this_sls in error:\n errors[i] = (\n 'No matching sls found for \\'{0}\\' '\n 'in env \\'{1}\\''.format(sls_match, saltenv))\n all_errors.extend(errors)\n\n self.clean_duplicate_extends(highstate)\n return highstate, all_errors\n",
"def matches_whitelist(self, matches, whitelist):\n '''\n Reads over the matches and returns a matches dict with just the ones\n that are in the whitelist\n '''\n if not whitelist:\n return matches\n ret_matches = {}\n if not isinstance(whitelist, list):\n whitelist = whitelist.split(',')\n for env in matches:\n for sls in matches[env]:\n if sls in whitelist:\n ret_matches[env] = ret_matches[env] if env in ret_matches else []\n ret_matches[env].append(sls)\n return ret_matches\n"
] |
class ThorState(salt.state.HighState):
'''
Compile the thorium state and manage it in the thorium runtime
'''
def __init__(
self,
opts,
grains=False,
grain_keys=None,
pillar=False,
pillar_keys=None):
self.grains = grains
self.grain_keys = grain_keys
self.pillar = pillar
self.pillar_keys = pillar_keys
opts['file_roots'] = opts['thorium_roots']
opts['saltenv'] = opts['thoriumenv']
opts['state_top'] = opts['thorium_top']
opts['file_client'] = 'local'
self.opts = opts
if opts.get('minion_data_cache'):
self.cache = salt.cache.factory(opts)
salt.state.HighState.__init__(self, self.opts, loader='thorium')
self.returners = salt.loader.returners(self.opts, {})
self.reg_ret = self.opts.get('register_returner', None)
regdata = {}
if self.reg_ret is not None:
try:
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
except Exception as exc:
log.error(exc)
self.state.inject_globals = {'__reg__': regdata}
self.event = salt.utils.event.get_master_event(
self.opts,
self.opts['sock_dir'])
def gather_cache(self):
'''
Gather the specified data from the minion data cache
'''
cache = {'grains': {}, 'pillar': {}}
if self.grains or self.pillar:
if self.opts.get('minion_data_cache'):
minions = self.cache.list('minions')
if not minions:
return cache
for minion in minions:
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
if 'pillar' in total:
if self.pillar_keys:
for key in self.pillar_keys:
if key in total['pillar']:
cache['pillar'][minion][key] = total['pillar'][key]
else:
cache['pillar'][minion] = total['pillar']
else:
cache['pillar'][minion] = {}
if 'grains' in total:
if self.grain_keys:
for key in self.grain_keys:
if key in total['grains']:
cache['grains'][minion][key] = total['grains'][key]
else:
cache['grains'][minion] = total['grains']
else:
cache['grains'][minion] = {}
return cache
def start_runtime(self):
'''
Start the system!
'''
while True:
try:
self.call_runtime()
except Exception:
log.error('Exception in Thorium: ', exc_info=True)
time.sleep(self.opts['thorium_interval'])
def get_events(self):
'''
iterate over the available events and return a list of events
'''
ret = []
while True:
event = self.event.get_event(wait=1, full=True)
if event is None:
return ret
ret.append(event)
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time()
|
saltstack/salt
|
salt/thorium/__init__.py
|
ThorState.get_events
|
python
|
def get_events(self):
'''
iterate over the available events and return a list of events
'''
ret = []
while True:
event = self.event.get_event(wait=1, full=True)
if event is None:
return ret
ret.append(event)
|
iterate over the available events and return a list of events
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L152-L161
| null |
class ThorState(salt.state.HighState):
'''
Compile the thorium state and manage it in the thorium runtime
'''
def __init__(
self,
opts,
grains=False,
grain_keys=None,
pillar=False,
pillar_keys=None):
self.grains = grains
self.grain_keys = grain_keys
self.pillar = pillar
self.pillar_keys = pillar_keys
opts['file_roots'] = opts['thorium_roots']
opts['saltenv'] = opts['thoriumenv']
opts['state_top'] = opts['thorium_top']
opts['file_client'] = 'local'
self.opts = opts
if opts.get('minion_data_cache'):
self.cache = salt.cache.factory(opts)
salt.state.HighState.__init__(self, self.opts, loader='thorium')
self.returners = salt.loader.returners(self.opts, {})
self.reg_ret = self.opts.get('register_returner', None)
regdata = {}
if self.reg_ret is not None:
try:
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
except Exception as exc:
log.error(exc)
self.state.inject_globals = {'__reg__': regdata}
self.event = salt.utils.event.get_master_event(
self.opts,
self.opts['sock_dir'])
def gather_cache(self):
'''
Gather the specified data from the minion data cache
'''
cache = {'grains': {}, 'pillar': {}}
if self.grains or self.pillar:
if self.opts.get('minion_data_cache'):
minions = self.cache.list('minions')
if not minions:
return cache
for minion in minions:
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
if 'pillar' in total:
if self.pillar_keys:
for key in self.pillar_keys:
if key in total['pillar']:
cache['pillar'][minion][key] = total['pillar'][key]
else:
cache['pillar'][minion] = total['pillar']
else:
cache['pillar'][minion] = {}
if 'grains' in total:
if self.grain_keys:
for key in self.grain_keys:
if key in total['grains']:
cache['grains'][minion][key] = total['grains'][key]
else:
cache['grains'][minion] = total['grains']
else:
cache['grains'][minion] = {}
return cache
def start_runtime(self):
'''
Start the system!
'''
while True:
try:
self.call_runtime()
except Exception:
log.error('Exception in Thorium: ', exc_info=True)
time.sleep(self.opts['thorium_interval'])
def get_chunks(self, exclude=None, whitelist=None):
'''
Compile the top file and return the lowstate for the thorium runtime
to iterate over
'''
ret = {}
err = []
try:
top = self.get_top()
except SaltRenderError as err:
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file found!'
raise SaltRenderError(msg)
matches = self.matches_whitelist(matches, whitelist)
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
high, ext_errors = self.state.reconcile_extend(high)
err += ext_errors
err += self.state.verify_high(high)
if err:
raise SaltRenderError(err)
return self.state.compile_high_data(high)
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time()
|
saltstack/salt
|
salt/thorium/__init__.py
|
ThorState.call_runtime
|
python
|
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time()
|
Execute the runtime
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L163-L190
|
[
"def gather_cache(self):\n '''\n Gather the specified data from the minion data cache\n '''\n cache = {'grains': {}, 'pillar': {}}\n if self.grains or self.pillar:\n if self.opts.get('minion_data_cache'):\n minions = self.cache.list('minions')\n if not minions:\n return cache\n for minion in minions:\n total = self.cache.fetch('minions/{0}'.format(minion), 'data')\n\n if 'pillar' in total:\n if self.pillar_keys:\n for key in self.pillar_keys:\n if key in total['pillar']:\n cache['pillar'][minion][key] = total['pillar'][key]\n else:\n cache['pillar'][minion] = total['pillar']\n else:\n cache['pillar'][minion] = {}\n\n if 'grains' in total:\n if self.grain_keys:\n for key in self.grain_keys:\n if key in total['grains']:\n cache['grains'][minion][key] = total['grains'][key]\n else:\n cache['grains'][minion] = total['grains']\n else:\n cache['grains'][minion] = {}\n return cache\n",
"def get_chunks(self, exclude=None, whitelist=None):\n '''\n Compile the top file and return the lowstate for the thorium runtime\n to iterate over\n '''\n ret = {}\n err = []\n try:\n top = self.get_top()\n except SaltRenderError as err:\n return ret\n except Exception:\n trb = traceback.format_exc()\n err.append(trb)\n return err\n err += self.verify_tops(top)\n matches = self.top_matches(top)\n if not matches:\n msg = 'No Top file found!'\n raise SaltRenderError(msg)\n matches = self.matches_whitelist(matches, whitelist)\n high, errors = self.render_highstate(matches)\n if exclude:\n if isinstance(exclude, six.string_types):\n exclude = exclude.split(',')\n if '__exclude__' in high:\n high['__exclude__'].extend(exclude)\n else:\n high['__exclude__'] = exclude\n err += errors\n high, ext_errors = self.state.reconcile_extend(high)\n err += ext_errors\n err += self.state.verify_high(high)\n if err:\n raise SaltRenderError(err)\n return self.state.compile_high_data(high)\n",
"def get_events(self):\n '''\n iterate over the available events and return a list of events\n '''\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)\n"
] |
class ThorState(salt.state.HighState):
'''
Compile the thorium state and manage it in the thorium runtime
'''
def __init__(
self,
opts,
grains=False,
grain_keys=None,
pillar=False,
pillar_keys=None):
self.grains = grains
self.grain_keys = grain_keys
self.pillar = pillar
self.pillar_keys = pillar_keys
opts['file_roots'] = opts['thorium_roots']
opts['saltenv'] = opts['thoriumenv']
opts['state_top'] = opts['thorium_top']
opts['file_client'] = 'local'
self.opts = opts
if opts.get('minion_data_cache'):
self.cache = salt.cache.factory(opts)
salt.state.HighState.__init__(self, self.opts, loader='thorium')
self.returners = salt.loader.returners(self.opts, {})
self.reg_ret = self.opts.get('register_returner', None)
regdata = {}
if self.reg_ret is not None:
try:
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
except Exception as exc:
log.error(exc)
self.state.inject_globals = {'__reg__': regdata}
self.event = salt.utils.event.get_master_event(
self.opts,
self.opts['sock_dir'])
def gather_cache(self):
'''
Gather the specified data from the minion data cache
'''
cache = {'grains': {}, 'pillar': {}}
if self.grains or self.pillar:
if self.opts.get('minion_data_cache'):
minions = self.cache.list('minions')
if not minions:
return cache
for minion in minions:
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
if 'pillar' in total:
if self.pillar_keys:
for key in self.pillar_keys:
if key in total['pillar']:
cache['pillar'][minion][key] = total['pillar'][key]
else:
cache['pillar'][minion] = total['pillar']
else:
cache['pillar'][minion] = {}
if 'grains' in total:
if self.grain_keys:
for key in self.grain_keys:
if key in total['grains']:
cache['grains'][minion][key] = total['grains'][key]
else:
cache['grains'][minion] = total['grains']
else:
cache['grains'][minion] = {}
return cache
def start_runtime(self):
'''
Start the system!
'''
while True:
try:
self.call_runtime()
except Exception:
log.error('Exception in Thorium: ', exc_info=True)
time.sleep(self.opts['thorium_interval'])
def get_chunks(self, exclude=None, whitelist=None):
'''
Compile the top file and return the lowstate for the thorium runtime
to iterate over
'''
ret = {}
err = []
try:
top = self.get_top()
except SaltRenderError as err:
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file found!'
raise SaltRenderError(msg)
matches = self.matches_whitelist(matches, whitelist)
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
high, ext_errors = self.state.reconcile_extend(high)
err += ext_errors
err += self.state.verify_high(high)
if err:
raise SaltRenderError(err)
return self.state.compile_high_data(high)
def get_events(self):
'''
iterate over the available events and return a list of events
'''
ret = []
while True:
event = self.event.get_event(wait=1, full=True)
if event is None:
return ret
ret.append(event)
|
saltstack/salt
|
salt/modules/container_resource.py
|
_validate
|
python
|
def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper
|
Decorator for common function argument validation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/container_resource.py#L35-L64
| null |
# -*- coding: utf-8 -*-
'''
Common resources for LXC and systemd-nspawn containers
.. versionadded:: 2015.8.0
These functions are not designed to be called directly, but instead from the
:mod:`lxc <salt.modules.lxc>`, :mod:`nspawn <salt.modules.nspawn>`, and
:mod:`docker <salt.modules.docker>` execution modules. They provide for
common logic to be re-used for common actions.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import copy
import logging
import os
import pipes
import time
import traceback
# Import salt libs
import salt.utils.args
import salt.utils.path
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
PATH = 'PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:' \
'/usr/local/bin:/usr/local/sbin'
def _nsenter(pid):
'''
Return the nsenter command to attach to the named container
'''
return (
'nsenter --target {0} --mount --uts --ipc --net --pid'
.format(pid)
)
def _get_md5(name, path, run_func):
'''
Get the MD5 checksum of a file from a container
'''
output = run_func(name,
'md5sum {0}'.format(pipes.quote(path)),
ignore_retcode=True)['stdout']
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None
def cache_file(source):
'''
Wrapper for cp.cache_file which raises an error if the file was unable to
be cached.
CLI Example:
.. code-block:: bash
salt myminion container_resource.cache_file salt://foo/bar/baz.txt
'''
try:
# Don't just use cp.cache_file for this. Docker has its own code to
# pull down images from the web.
if source.startswith('salt://'):
cached_source = __salt__['cp.cache_file'](source)
if not cached_source:
raise CommandExecutionError(
'Unable to cache {0}'.format(source)
)
return cached_source
except AttributeError:
raise SaltInvocationError('Invalid source file {0}'.format(source))
return source
@_validate
def run(name,
cmd,
container_type=None,
exec_driver=None,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
path=None,
use_vt=False,
keep_env=None):
'''
Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout
'''
valid_output = ('stdout', 'stderr', 'retcode', 'all')
if output is None:
cmd_func = 'cmd.run'
elif output not in valid_output:
raise SaltInvocationError(
'\'output\' param must be one of the following: {0}'
.format(', '.join(valid_output))
)
else:
cmd_func = 'cmd.run_all'
if keep_env is None or isinstance(keep_env, bool):
to_keep = []
elif not isinstance(keep_env, (list, tuple)):
try:
to_keep = keep_env.split(',')
except AttributeError:
log.warning('Invalid keep_env value, ignoring')
to_keep = []
else:
to_keep = keep_env
if exec_driver == 'lxc-attach':
full_cmd = 'lxc-attach '
if path:
full_cmd += '-P {0} '.format(pipes.quote(path))
if keep_env is not True:
full_cmd += '--clear-env '
if 'PATH' not in to_keep:
full_cmd += '--set-var {0} '.format(PATH)
# --clear-env results in a very restrictive PATH
# (/bin:/usr/bin), use a good fallback.
full_cmd += ' '.join(
['--set-var {0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' -n {0} -- {1}'.format(pipes.quote(name), cmd)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
full_cmd = (
'nsenter --target {0} --mount --uts --ipc --net --pid -- '
.format(pid)
)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
elif exec_driver == 'docker-exec':
# We're using docker exec on the CLI as opposed to via docker-py, since
# the Docker API doesn't return stdout and stderr separately.
full_cmd = 'docker exec '
if stdin:
full_cmd += '-i '
full_cmd += '{0} '.format(name)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
if not use_vt:
ret = __salt__[cmd_func](full_cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode)
else:
stdout, stderr = '', ''
proc = salt.utils.vt.Terminal(
full_cmd,
shell=python_shell,
log_stdin_level='quiet' if output_loglevel == 'quiet' else 'info',
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False
)
# Consume output
try:
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = stdout if output is None \
else {'retcode': proc.exitstatus,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
except salt.utils.vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = stdout if output is None \
else {'retcode': 127,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
finally:
proc.terminate()
return ret
@_validate
def copy_to(name,
source,
dest,
container_type=None,
path=None,
exec_driver=None,
overwrite=False,
makedirs=False):
'''
Common logic for copying files to containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
'''
# Get the appropriate functions
state = __salt__['{0}.state'.format(container_type)]
def run_all(*args, **akwargs):
akwargs = copy.deepcopy(akwargs)
if container_type in ['lxc'] and 'path' not in akwargs:
akwargs['path'] = path
return __salt__['{0}.run_all'.format(container_type)](
*args, **akwargs)
state_kwargs = {}
cmd_kwargs = {'ignore_retcode': True}
if container_type in ['lxc']:
cmd_kwargs['path'] = path
state_kwargs['path'] = path
def _state(name):
if state_kwargs:
return state(name, **state_kwargs)
else:
return state(name)
c_state = _state(name)
if c_state != 'running':
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
local_file = cache_file(source)
source_dir, source_name = os.path.split(local_file)
# Source file sanity checks
if not os.path.isabs(local_file):
raise SaltInvocationError('Source path must be absolute')
elif not os.path.exists(local_file):
raise SaltInvocationError(
'Source file {0} does not exist'.format(local_file)
)
elif not os.path.isfile(local_file):
raise SaltInvocationError('Source must be a regular file')
# Destination file sanity checks
if not os.path.isabs(dest):
raise SaltInvocationError('Destination path must be absolute')
if run_all(name,
'test -d {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
# Destination is a directory, full path to dest file will include the
# basename of the source file.
dest = os.path.join(dest, source_name)
else:
# Destination was not a directory. We will check to see if the parent
# dir is a directory, and then (if makedirs=True) attempt to create the
# parent directory.
dest_dir, dest_name = os.path.split(dest)
if run_all(name,
'test -d {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)['retcode'] != 0:
if makedirs:
result = run_all(name,
'mkdir -p {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)
if result['retcode'] != 0:
error = ('Unable to create destination directory {0} in '
'container \'{1}\''.format(dest_dir, name))
if result['stderr']:
error += ': {0}'.format(result['stderr'])
raise CommandExecutionError(error)
else:
raise SaltInvocationError(
'Directory {0} does not exist on {1} container \'{2}\''
.format(dest_dir, container_type, name)
)
if not overwrite and run_all(name,
'test -e {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
raise CommandExecutionError(
'Destination path {0} already exists. Use overwrite=True to '
'overwrite it'.format(dest)
)
# Before we try to replace the file, compare checksums.
source_md5 = __salt__['file.get_sum'](local_file, 'md5')
if source_md5 == _get_md5(name, dest, run_all):
log.debug('%s and %s:%s are the same file, skipping copy', source, name, dest)
return True
log.debug('Copying %s to %s container \'%s\' as %s',
source, container_type, name, dest)
# Using cat here instead of opening the file, reading it into memory,
# and passing it as stdin to run(). This will keep down memory
# usage for the minion and make the operation run quicker.
if exec_driver == 'lxc-attach':
lxcattach = 'lxc-attach'
if path:
lxcattach += ' -P {0}'.format(pipes.quote(path))
copy_cmd = (
'cat "{0}" | {4} --clear-env --set-var {1} -n {2} -- '
'tee "{3}"'.format(local_file, PATH, name, dest, lxcattach)
)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
copy_cmd = (
'cat "{0}" | {1} env -i {2} tee "{3}"'
.format(local_file, _nsenter(pid), PATH, dest)
)
elif exec_driver == 'docker-exec':
copy_cmd = (
'cat "{0}" | docker exec -i {1} env -i {2} tee "{3}"'
.format(local_file, name, PATH, dest)
)
__salt__['cmd.run'](copy_cmd, python_shell=True, output_loglevel='quiet')
return source_md5 == _get_md5(name, dest, run_all)
|
saltstack/salt
|
salt/modules/container_resource.py
|
cache_file
|
python
|
def cache_file(source):
'''
Wrapper for cp.cache_file which raises an error if the file was unable to
be cached.
CLI Example:
.. code-block:: bash
salt myminion container_resource.cache_file salt://foo/bar/baz.txt
'''
try:
# Don't just use cp.cache_file for this. Docker has its own code to
# pull down images from the web.
if source.startswith('salt://'):
cached_source = __salt__['cp.cache_file'](source)
if not cached_source:
raise CommandExecutionError(
'Unable to cache {0}'.format(source)
)
return cached_source
except AttributeError:
raise SaltInvocationError('Invalid source file {0}'.format(source))
return source
|
Wrapper for cp.cache_file which raises an error if the file was unable to
be cached.
CLI Example:
.. code-block:: bash
salt myminion container_resource.cache_file salt://foo/bar/baz.txt
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/container_resource.py#L91-L114
| null |
# -*- coding: utf-8 -*-
'''
Common resources for LXC and systemd-nspawn containers
.. versionadded:: 2015.8.0
These functions are not designed to be called directly, but instead from the
:mod:`lxc <salt.modules.lxc>`, :mod:`nspawn <salt.modules.nspawn>`, and
:mod:`docker <salt.modules.docker>` execution modules. They provide for
common logic to be re-used for common actions.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import copy
import logging
import os
import pipes
import time
import traceback
# Import salt libs
import salt.utils.args
import salt.utils.path
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
PATH = 'PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:' \
'/usr/local/bin:/usr/local/sbin'
def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper
def _nsenter(pid):
'''
Return the nsenter command to attach to the named container
'''
return (
'nsenter --target {0} --mount --uts --ipc --net --pid'
.format(pid)
)
def _get_md5(name, path, run_func):
'''
Get the MD5 checksum of a file from a container
'''
output = run_func(name,
'md5sum {0}'.format(pipes.quote(path)),
ignore_retcode=True)['stdout']
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None
@_validate
def run(name,
cmd,
container_type=None,
exec_driver=None,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
path=None,
use_vt=False,
keep_env=None):
'''
Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout
'''
valid_output = ('stdout', 'stderr', 'retcode', 'all')
if output is None:
cmd_func = 'cmd.run'
elif output not in valid_output:
raise SaltInvocationError(
'\'output\' param must be one of the following: {0}'
.format(', '.join(valid_output))
)
else:
cmd_func = 'cmd.run_all'
if keep_env is None or isinstance(keep_env, bool):
to_keep = []
elif not isinstance(keep_env, (list, tuple)):
try:
to_keep = keep_env.split(',')
except AttributeError:
log.warning('Invalid keep_env value, ignoring')
to_keep = []
else:
to_keep = keep_env
if exec_driver == 'lxc-attach':
full_cmd = 'lxc-attach '
if path:
full_cmd += '-P {0} '.format(pipes.quote(path))
if keep_env is not True:
full_cmd += '--clear-env '
if 'PATH' not in to_keep:
full_cmd += '--set-var {0} '.format(PATH)
# --clear-env results in a very restrictive PATH
# (/bin:/usr/bin), use a good fallback.
full_cmd += ' '.join(
['--set-var {0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' -n {0} -- {1}'.format(pipes.quote(name), cmd)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
full_cmd = (
'nsenter --target {0} --mount --uts --ipc --net --pid -- '
.format(pid)
)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
elif exec_driver == 'docker-exec':
# We're using docker exec on the CLI as opposed to via docker-py, since
# the Docker API doesn't return stdout and stderr separately.
full_cmd = 'docker exec '
if stdin:
full_cmd += '-i '
full_cmd += '{0} '.format(name)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
if not use_vt:
ret = __salt__[cmd_func](full_cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode)
else:
stdout, stderr = '', ''
proc = salt.utils.vt.Terminal(
full_cmd,
shell=python_shell,
log_stdin_level='quiet' if output_loglevel == 'quiet' else 'info',
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False
)
# Consume output
try:
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = stdout if output is None \
else {'retcode': proc.exitstatus,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
except salt.utils.vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = stdout if output is None \
else {'retcode': 127,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
finally:
proc.terminate()
return ret
@_validate
def copy_to(name,
source,
dest,
container_type=None,
path=None,
exec_driver=None,
overwrite=False,
makedirs=False):
'''
Common logic for copying files to containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
'''
# Get the appropriate functions
state = __salt__['{0}.state'.format(container_type)]
def run_all(*args, **akwargs):
akwargs = copy.deepcopy(akwargs)
if container_type in ['lxc'] and 'path' not in akwargs:
akwargs['path'] = path
return __salt__['{0}.run_all'.format(container_type)](
*args, **akwargs)
state_kwargs = {}
cmd_kwargs = {'ignore_retcode': True}
if container_type in ['lxc']:
cmd_kwargs['path'] = path
state_kwargs['path'] = path
def _state(name):
if state_kwargs:
return state(name, **state_kwargs)
else:
return state(name)
c_state = _state(name)
if c_state != 'running':
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
local_file = cache_file(source)
source_dir, source_name = os.path.split(local_file)
# Source file sanity checks
if not os.path.isabs(local_file):
raise SaltInvocationError('Source path must be absolute')
elif not os.path.exists(local_file):
raise SaltInvocationError(
'Source file {0} does not exist'.format(local_file)
)
elif not os.path.isfile(local_file):
raise SaltInvocationError('Source must be a regular file')
# Destination file sanity checks
if not os.path.isabs(dest):
raise SaltInvocationError('Destination path must be absolute')
if run_all(name,
'test -d {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
# Destination is a directory, full path to dest file will include the
# basename of the source file.
dest = os.path.join(dest, source_name)
else:
# Destination was not a directory. We will check to see if the parent
# dir is a directory, and then (if makedirs=True) attempt to create the
# parent directory.
dest_dir, dest_name = os.path.split(dest)
if run_all(name,
'test -d {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)['retcode'] != 0:
if makedirs:
result = run_all(name,
'mkdir -p {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)
if result['retcode'] != 0:
error = ('Unable to create destination directory {0} in '
'container \'{1}\''.format(dest_dir, name))
if result['stderr']:
error += ': {0}'.format(result['stderr'])
raise CommandExecutionError(error)
else:
raise SaltInvocationError(
'Directory {0} does not exist on {1} container \'{2}\''
.format(dest_dir, container_type, name)
)
if not overwrite and run_all(name,
'test -e {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
raise CommandExecutionError(
'Destination path {0} already exists. Use overwrite=True to '
'overwrite it'.format(dest)
)
# Before we try to replace the file, compare checksums.
source_md5 = __salt__['file.get_sum'](local_file, 'md5')
if source_md5 == _get_md5(name, dest, run_all):
log.debug('%s and %s:%s are the same file, skipping copy', source, name, dest)
return True
log.debug('Copying %s to %s container \'%s\' as %s',
source, container_type, name, dest)
# Using cat here instead of opening the file, reading it into memory,
# and passing it as stdin to run(). This will keep down memory
# usage for the minion and make the operation run quicker.
if exec_driver == 'lxc-attach':
lxcattach = 'lxc-attach'
if path:
lxcattach += ' -P {0}'.format(pipes.quote(path))
copy_cmd = (
'cat "{0}" | {4} --clear-env --set-var {1} -n {2} -- '
'tee "{3}"'.format(local_file, PATH, name, dest, lxcattach)
)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
copy_cmd = (
'cat "{0}" | {1} env -i {2} tee "{3}"'
.format(local_file, _nsenter(pid), PATH, dest)
)
elif exec_driver == 'docker-exec':
copy_cmd = (
'cat "{0}" | docker exec -i {1} env -i {2} tee "{3}"'
.format(local_file, name, PATH, dest)
)
__salt__['cmd.run'](copy_cmd, python_shell=True, output_loglevel='quiet')
return source_md5 == _get_md5(name, dest, run_all)
|
saltstack/salt
|
salt/modules/container_resource.py
|
run
|
python
|
def run(name,
cmd,
container_type=None,
exec_driver=None,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
path=None,
use_vt=False,
keep_env=None):
'''
Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout
'''
valid_output = ('stdout', 'stderr', 'retcode', 'all')
if output is None:
cmd_func = 'cmd.run'
elif output not in valid_output:
raise SaltInvocationError(
'\'output\' param must be one of the following: {0}'
.format(', '.join(valid_output))
)
else:
cmd_func = 'cmd.run_all'
if keep_env is None or isinstance(keep_env, bool):
to_keep = []
elif not isinstance(keep_env, (list, tuple)):
try:
to_keep = keep_env.split(',')
except AttributeError:
log.warning('Invalid keep_env value, ignoring')
to_keep = []
else:
to_keep = keep_env
if exec_driver == 'lxc-attach':
full_cmd = 'lxc-attach '
if path:
full_cmd += '-P {0} '.format(pipes.quote(path))
if keep_env is not True:
full_cmd += '--clear-env '
if 'PATH' not in to_keep:
full_cmd += '--set-var {0} '.format(PATH)
# --clear-env results in a very restrictive PATH
# (/bin:/usr/bin), use a good fallback.
full_cmd += ' '.join(
['--set-var {0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' -n {0} -- {1}'.format(pipes.quote(name), cmd)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
full_cmd = (
'nsenter --target {0} --mount --uts --ipc --net --pid -- '
.format(pid)
)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
elif exec_driver == 'docker-exec':
# We're using docker exec on the CLI as opposed to via docker-py, since
# the Docker API doesn't return stdout and stderr separately.
full_cmd = 'docker exec '
if stdin:
full_cmd += '-i '
full_cmd += '{0} '.format(name)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
if not use_vt:
ret = __salt__[cmd_func](full_cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode)
else:
stdout, stderr = '', ''
proc = salt.utils.vt.Terminal(
full_cmd,
shell=python_shell,
log_stdin_level='quiet' if output_loglevel == 'quiet' else 'info',
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False
)
# Consume output
try:
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = stdout if output is None \
else {'retcode': proc.exitstatus,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
except salt.utils.vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = stdout if output is None \
else {'retcode': 127,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
finally:
proc.terminate()
return ret
|
Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/container_resource.py#L118-L266
|
[
"def recv(self, maxsize=None):\n '''\n Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If\n any of those is ``None`` we can no longer communicate with the\n terminal's child process.\n '''\n if maxsize is None:\n maxsize = 1024\n elif maxsize < 1:\n maxsize = 1\n return self._recv(maxsize)\n",
"def terminate(self, force=False):\n '''\n This forces a child process to terminate. It starts nicely with\n SIGHUP and SIGINT. If \"force\" is True then moves onto SIGKILL. This\n returns True if the child was terminated. This returns False if the\n child could not be terminated.\n '''\n if not self.closed:\n self.close(terminate=False)\n\n if not self.isalive():\n return True\n try:\n self.send_signal(signal.SIGHUP)\n time.sleep(0.1)\n if not self.isalive():\n return True\n self.send_signal(signal.SIGCONT)\n time.sleep(0.1)\n if not self.isalive():\n return True\n self.send_signal(signal.SIGINT)\n time.sleep(0.1)\n if not self.isalive():\n return True\n if force:\n self.send_signal(signal.SIGKILL)\n time.sleep(0.1)\n if not self.isalive():\n return True\n else:\n return False\n return False\n except OSError:\n # I think there are kernel timing issues that sometimes cause\n # this to happen. I think isalive() reports True, but the\n # process is dead to the kernel.\n # Make one last attempt to see if the kernel is up to date.\n time.sleep(0.1)\n if not self.isalive():\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Common resources for LXC and systemd-nspawn containers
.. versionadded:: 2015.8.0
These functions are not designed to be called directly, but instead from the
:mod:`lxc <salt.modules.lxc>`, :mod:`nspawn <salt.modules.nspawn>`, and
:mod:`docker <salt.modules.docker>` execution modules. They provide for
common logic to be re-used for common actions.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import copy
import logging
import os
import pipes
import time
import traceback
# Import salt libs
import salt.utils.args
import salt.utils.path
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
PATH = 'PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:' \
'/usr/local/bin:/usr/local/sbin'
def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper
def _nsenter(pid):
'''
Return the nsenter command to attach to the named container
'''
return (
'nsenter --target {0} --mount --uts --ipc --net --pid'
.format(pid)
)
def _get_md5(name, path, run_func):
'''
Get the MD5 checksum of a file from a container
'''
output = run_func(name,
'md5sum {0}'.format(pipes.quote(path)),
ignore_retcode=True)['stdout']
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None
def cache_file(source):
'''
Wrapper for cp.cache_file which raises an error if the file was unable to
be cached.
CLI Example:
.. code-block:: bash
salt myminion container_resource.cache_file salt://foo/bar/baz.txt
'''
try:
# Don't just use cp.cache_file for this. Docker has its own code to
# pull down images from the web.
if source.startswith('salt://'):
cached_source = __salt__['cp.cache_file'](source)
if not cached_source:
raise CommandExecutionError(
'Unable to cache {0}'.format(source)
)
return cached_source
except AttributeError:
raise SaltInvocationError('Invalid source file {0}'.format(source))
return source
@_validate
@_validate
def copy_to(name,
source,
dest,
container_type=None,
path=None,
exec_driver=None,
overwrite=False,
makedirs=False):
'''
Common logic for copying files to containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
'''
# Get the appropriate functions
state = __salt__['{0}.state'.format(container_type)]
def run_all(*args, **akwargs):
akwargs = copy.deepcopy(akwargs)
if container_type in ['lxc'] and 'path' not in akwargs:
akwargs['path'] = path
return __salt__['{0}.run_all'.format(container_type)](
*args, **akwargs)
state_kwargs = {}
cmd_kwargs = {'ignore_retcode': True}
if container_type in ['lxc']:
cmd_kwargs['path'] = path
state_kwargs['path'] = path
def _state(name):
if state_kwargs:
return state(name, **state_kwargs)
else:
return state(name)
c_state = _state(name)
if c_state != 'running':
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
local_file = cache_file(source)
source_dir, source_name = os.path.split(local_file)
# Source file sanity checks
if not os.path.isabs(local_file):
raise SaltInvocationError('Source path must be absolute')
elif not os.path.exists(local_file):
raise SaltInvocationError(
'Source file {0} does not exist'.format(local_file)
)
elif not os.path.isfile(local_file):
raise SaltInvocationError('Source must be a regular file')
# Destination file sanity checks
if not os.path.isabs(dest):
raise SaltInvocationError('Destination path must be absolute')
if run_all(name,
'test -d {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
# Destination is a directory, full path to dest file will include the
# basename of the source file.
dest = os.path.join(dest, source_name)
else:
# Destination was not a directory. We will check to see if the parent
# dir is a directory, and then (if makedirs=True) attempt to create the
# parent directory.
dest_dir, dest_name = os.path.split(dest)
if run_all(name,
'test -d {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)['retcode'] != 0:
if makedirs:
result = run_all(name,
'mkdir -p {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)
if result['retcode'] != 0:
error = ('Unable to create destination directory {0} in '
'container \'{1}\''.format(dest_dir, name))
if result['stderr']:
error += ': {0}'.format(result['stderr'])
raise CommandExecutionError(error)
else:
raise SaltInvocationError(
'Directory {0} does not exist on {1} container \'{2}\''
.format(dest_dir, container_type, name)
)
if not overwrite and run_all(name,
'test -e {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
raise CommandExecutionError(
'Destination path {0} already exists. Use overwrite=True to '
'overwrite it'.format(dest)
)
# Before we try to replace the file, compare checksums.
source_md5 = __salt__['file.get_sum'](local_file, 'md5')
if source_md5 == _get_md5(name, dest, run_all):
log.debug('%s and %s:%s are the same file, skipping copy', source, name, dest)
return True
log.debug('Copying %s to %s container \'%s\' as %s',
source, container_type, name, dest)
# Using cat here instead of opening the file, reading it into memory,
# and passing it as stdin to run(). This will keep down memory
# usage for the minion and make the operation run quicker.
if exec_driver == 'lxc-attach':
lxcattach = 'lxc-attach'
if path:
lxcattach += ' -P {0}'.format(pipes.quote(path))
copy_cmd = (
'cat "{0}" | {4} --clear-env --set-var {1} -n {2} -- '
'tee "{3}"'.format(local_file, PATH, name, dest, lxcattach)
)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
copy_cmd = (
'cat "{0}" | {1} env -i {2} tee "{3}"'
.format(local_file, _nsenter(pid), PATH, dest)
)
elif exec_driver == 'docker-exec':
copy_cmd = (
'cat "{0}" | docker exec -i {1} env -i {2} tee "{3}"'
.format(local_file, name, PATH, dest)
)
__salt__['cmd.run'](copy_cmd, python_shell=True, output_loglevel='quiet')
return source_md5 == _get_md5(name, dest, run_all)
|
saltstack/salt
|
salt/modules/container_resource.py
|
copy_to
|
python
|
def copy_to(name,
source,
dest,
container_type=None,
path=None,
exec_driver=None,
overwrite=False,
makedirs=False):
'''
Common logic for copying files to containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
'''
# Get the appropriate functions
state = __salt__['{0}.state'.format(container_type)]
def run_all(*args, **akwargs):
akwargs = copy.deepcopy(akwargs)
if container_type in ['lxc'] and 'path' not in akwargs:
akwargs['path'] = path
return __salt__['{0}.run_all'.format(container_type)](
*args, **akwargs)
state_kwargs = {}
cmd_kwargs = {'ignore_retcode': True}
if container_type in ['lxc']:
cmd_kwargs['path'] = path
state_kwargs['path'] = path
def _state(name):
if state_kwargs:
return state(name, **state_kwargs)
else:
return state(name)
c_state = _state(name)
if c_state != 'running':
raise CommandExecutionError(
'Container \'{0}\' is not running'.format(name)
)
local_file = cache_file(source)
source_dir, source_name = os.path.split(local_file)
# Source file sanity checks
if not os.path.isabs(local_file):
raise SaltInvocationError('Source path must be absolute')
elif not os.path.exists(local_file):
raise SaltInvocationError(
'Source file {0} does not exist'.format(local_file)
)
elif not os.path.isfile(local_file):
raise SaltInvocationError('Source must be a regular file')
# Destination file sanity checks
if not os.path.isabs(dest):
raise SaltInvocationError('Destination path must be absolute')
if run_all(name,
'test -d {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
# Destination is a directory, full path to dest file will include the
# basename of the source file.
dest = os.path.join(dest, source_name)
else:
# Destination was not a directory. We will check to see if the parent
# dir is a directory, and then (if makedirs=True) attempt to create the
# parent directory.
dest_dir, dest_name = os.path.split(dest)
if run_all(name,
'test -d {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)['retcode'] != 0:
if makedirs:
result = run_all(name,
'mkdir -p {0}'.format(pipes.quote(dest_dir)),
**cmd_kwargs)
if result['retcode'] != 0:
error = ('Unable to create destination directory {0} in '
'container \'{1}\''.format(dest_dir, name))
if result['stderr']:
error += ': {0}'.format(result['stderr'])
raise CommandExecutionError(error)
else:
raise SaltInvocationError(
'Directory {0} does not exist on {1} container \'{2}\''
.format(dest_dir, container_type, name)
)
if not overwrite and run_all(name,
'test -e {0}'.format(pipes.quote(dest)),
**cmd_kwargs)['retcode'] == 0:
raise CommandExecutionError(
'Destination path {0} already exists. Use overwrite=True to '
'overwrite it'.format(dest)
)
# Before we try to replace the file, compare checksums.
source_md5 = __salt__['file.get_sum'](local_file, 'md5')
if source_md5 == _get_md5(name, dest, run_all):
log.debug('%s and %s:%s are the same file, skipping copy', source, name, dest)
return True
log.debug('Copying %s to %s container \'%s\' as %s',
source, container_type, name, dest)
# Using cat here instead of opening the file, reading it into memory,
# and passing it as stdin to run(). This will keep down memory
# usage for the minion and make the operation run quicker.
if exec_driver == 'lxc-attach':
lxcattach = 'lxc-attach'
if path:
lxcattach += ' -P {0}'.format(pipes.quote(path))
copy_cmd = (
'cat "{0}" | {4} --clear-env --set-var {1} -n {2} -- '
'tee "{3}"'.format(local_file, PATH, name, dest, lxcattach)
)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
copy_cmd = (
'cat "{0}" | {1} env -i {2} tee "{3}"'
.format(local_file, _nsenter(pid), PATH, dest)
)
elif exec_driver == 'docker-exec':
copy_cmd = (
'cat "{0}" | docker exec -i {1} env -i {2} tee "{3}"'
.format(local_file, name, PATH, dest)
)
__salt__['cmd.run'](copy_cmd, python_shell=True, output_loglevel='quiet')
return source_md5 == _get_md5(name, dest, run_all)
|
Common logic for copying files to containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.copy_to mycontainer /local/file/path /container/file/path container_type=docker exec_driver=nsenter
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/container_resource.py#L270-L404
|
[
"def cache_file(source):\n '''\n Wrapper for cp.cache_file which raises an error if the file was unable to\n be cached.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion container_resource.cache_file salt://foo/bar/baz.txt\n '''\n try:\n # Don't just use cp.cache_file for this. Docker has its own code to\n # pull down images from the web.\n if source.startswith('salt://'):\n cached_source = __salt__['cp.cache_file'](source)\n if not cached_source:\n raise CommandExecutionError(\n 'Unable to cache {0}'.format(source)\n )\n return cached_source\n except AttributeError:\n raise SaltInvocationError('Invalid source file {0}'.format(source))\n return source\n",
"def _state(name):\n if state_kwargs:\n return state(name, **state_kwargs)\n else:\n return state(name)\n"
] |
# -*- coding: utf-8 -*-
'''
Common resources for LXC and systemd-nspawn containers
.. versionadded:: 2015.8.0
These functions are not designed to be called directly, but instead from the
:mod:`lxc <salt.modules.lxc>`, :mod:`nspawn <salt.modules.nspawn>`, and
:mod:`docker <salt.modules.docker>` execution modules. They provide for
common logic to be re-used for common actions.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import copy
import logging
import os
import pipes
import time
import traceback
# Import salt libs
import salt.utils.args
import salt.utils.path
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
PATH = 'PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:' \
'/usr/local/bin:/usr/local/sbin'
def _validate(wrapped):
'''
Decorator for common function argument validation
'''
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
container_type = kwargs.get('container_type')
exec_driver = kwargs.get('exec_driver')
valid_driver = {
'docker': ('lxc-attach', 'nsenter', 'docker-exec'),
'lxc': ('lxc-attach',),
'nspawn': ('nsenter',),
}
if container_type not in valid_driver:
raise SaltInvocationError(
'Invalid container type \'{0}\'. Valid types are: {1}'
.format(container_type, ', '.join(sorted(valid_driver)))
)
if exec_driver not in valid_driver[container_type]:
raise SaltInvocationError(
'Invalid command execution driver. Valid drivers are: {0}'
.format(', '.join(valid_driver[container_type]))
)
if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'):
raise SaltInvocationError(
'The \'lxc-attach\' execution driver has been chosen, but '
'lxc-attach is not available. LXC may not be installed.'
)
return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs))
return wrapper
def _nsenter(pid):
'''
Return the nsenter command to attach to the named container
'''
return (
'nsenter --target {0} --mount --uts --ipc --net --pid'
.format(pid)
)
def _get_md5(name, path, run_func):
'''
Get the MD5 checksum of a file from a container
'''
output = run_func(name,
'md5sum {0}'.format(pipes.quote(path)),
ignore_retcode=True)['stdout']
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None
def cache_file(source):
'''
Wrapper for cp.cache_file which raises an error if the file was unable to
be cached.
CLI Example:
.. code-block:: bash
salt myminion container_resource.cache_file salt://foo/bar/baz.txt
'''
try:
# Don't just use cp.cache_file for this. Docker has its own code to
# pull down images from the web.
if source.startswith('salt://'):
cached_source = __salt__['cp.cache_file'](source)
if not cached_source:
raise CommandExecutionError(
'Unable to cache {0}'.format(source)
)
return cached_source
except AttributeError:
raise SaltInvocationError('Invalid source file {0}'.format(source))
return source
@_validate
def run(name,
cmd,
container_type=None,
exec_driver=None,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
path=None,
use_vt=False,
keep_env=None):
'''
Common logic for running shell commands in containers
path
path to the container parent (for LXC only)
default: /var/lib/lxc (system default)
CLI Example:
.. code-block:: bash
salt myminion container_resource.run mycontainer 'ps aux' container_type=docker exec_driver=nsenter output=stdout
'''
valid_output = ('stdout', 'stderr', 'retcode', 'all')
if output is None:
cmd_func = 'cmd.run'
elif output not in valid_output:
raise SaltInvocationError(
'\'output\' param must be one of the following: {0}'
.format(', '.join(valid_output))
)
else:
cmd_func = 'cmd.run_all'
if keep_env is None or isinstance(keep_env, bool):
to_keep = []
elif not isinstance(keep_env, (list, tuple)):
try:
to_keep = keep_env.split(',')
except AttributeError:
log.warning('Invalid keep_env value, ignoring')
to_keep = []
else:
to_keep = keep_env
if exec_driver == 'lxc-attach':
full_cmd = 'lxc-attach '
if path:
full_cmd += '-P {0} '.format(pipes.quote(path))
if keep_env is not True:
full_cmd += '--clear-env '
if 'PATH' not in to_keep:
full_cmd += '--set-var {0} '.format(PATH)
# --clear-env results in a very restrictive PATH
# (/bin:/usr/bin), use a good fallback.
full_cmd += ' '.join(
['--set-var {0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' -n {0} -- {1}'.format(pipes.quote(name), cmd)
elif exec_driver == 'nsenter':
pid = __salt__['{0}.pid'.format(container_type)](name)
full_cmd = (
'nsenter --target {0} --mount --uts --ipc --net --pid -- '
.format(pid)
)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
elif exec_driver == 'docker-exec':
# We're using docker exec on the CLI as opposed to via docker-py, since
# the Docker API doesn't return stdout and stderr separately.
full_cmd = 'docker exec '
if stdin:
full_cmd += '-i '
full_cmd += '{0} '.format(name)
if keep_env is not True:
full_cmd += 'env -i '
if 'PATH' not in to_keep:
full_cmd += '{0} '.format(PATH)
full_cmd += ' '.join(
['{0}={1}'.format(x, pipes.quote(os.environ[x]))
for x in to_keep
if x in os.environ]
)
full_cmd += ' {0}'.format(cmd)
if not use_vt:
ret = __salt__[cmd_func](full_cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode)
else:
stdout, stderr = '', ''
proc = salt.utils.vt.Terminal(
full_cmd,
shell=python_shell,
log_stdin_level='quiet' if output_loglevel == 'quiet' else 'info',
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False
)
# Consume output
try:
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = stdout if output is None \
else {'retcode': proc.exitstatus,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
except salt.utils.vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = stdout if output is None \
else {'retcode': 127,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
finally:
proc.terminate()
return ret
@_validate
|
saltstack/salt
|
salt/executors/docker.py
|
execute
|
python
|
def execute(opts, data, func, args, kwargs):
'''
Directly calls the given function with arguments
'''
if data['fun'] == 'saltutil.find_job':
return __executors__['direct_call.execute'](opts, data, func, args, kwargs)
if data['fun'] in DOCKER_MOD_MAP:
return __executors__['direct_call.execute'](opts, data, __salt__[DOCKER_MOD_MAP[data['fun']]], [opts['proxy']['name']] + args, kwargs)
return __salt__['docker.call'](opts['proxy']['name'], data['fun'], *args, **kwargs)
|
Directly calls the given function with arguments
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/executors/docker.py#L28-L36
| null |
# -*- coding: utf-8 -*-
'''
Docker executor module
.. versionadded: 2019.2.0
Used with the docker proxy minion.
'''
from __future__ import absolute_import, unicode_literals
__virtualname__ = 'docker'
DOCKER_MOD_MAP = {
'state.sls': 'docker.sls',
'state.apply': 'docker.apply',
'state.highstate': 'docker.highstate',
}
def __virtual__():
if 'proxy' not in __opts__:
return False, 'Docker executor is only meant to be used with Docker Proxy Minions'
if __opts__.get('proxy', {}).get('proxytype') != __virtualname__:
return False, 'Proxytype does not match: {0}'.format(__virtualname__)
return True
def allow_missing_func(function): # pylint: disable=unused-argument
'''
Allow all calls to be passed through to docker container.
The docker call will use direct_call, which will return back if the module
was unable to be run.
'''
return True
|
saltstack/salt
|
salt/cli/salt.py
|
SaltCMD.run
|
python
|
def run(self):
'''
Execute the salt command line
'''
import salt.client
self.parse_args()
if self.config['log_level'] not in ('quiet', ):
# Setup file logging!
self.setup_logfile_logger()
verify_log(self.config)
try:
# We don't need to bail on config file permission errors
# if the CLI process is run with the -a flag
skip_perm_errors = self.options.eauth != ''
self.local_client = salt.client.get_local_client(
self.get_config_file_path(),
skip_perm_errors=skip_perm_errors,
auto_reconnect=True)
except SaltClientError as exc:
self.exit(2, '{0}\n'.format(exc))
return
if self.options.batch or self.options.static:
# _run_batch() will handle all output and
# exit with the appropriate error condition
# Execution will not continue past this point
# in batch mode.
self._run_batch()
return
if self.options.preview_target:
minion_list = self._preview_target()
self._output_ret(minion_list, self.config.get('output', 'nested'))
return
if self.options.timeout <= 0:
self.options.timeout = self.local_client.opts['timeout']
kwargs = {
'tgt': self.config['tgt'],
'fun': self.config['fun'],
'arg': self.config['arg'],
'timeout': self.options.timeout,
'show_timeout': self.options.show_timeout,
'show_jid': self.options.show_jid}
if 'token' in self.config:
import salt.utils.files
try:
with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_:
kwargs['key'] = fp_.readline()
except IOError:
kwargs['token'] = self.config['token']
kwargs['delimiter'] = self.options.delimiter
if self.selected_target_option:
kwargs['tgt_type'] = self.selected_target_option
else:
kwargs['tgt_type'] = 'glob'
# If batch_safe_limit is set, check minions matching target and
# potentially switch to batch execution
if self.options.batch_safe_limit > 1:
if len(self._preview_target()) >= self.options.batch_safe_limit:
salt.utils.stringutils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
self.options.batch = self.options.batch_safe_size
self._run_batch()
return
if getattr(self.options, 'return'):
kwargs['ret'] = getattr(self.options, 'return')
if getattr(self.options, 'return_config'):
kwargs['ret_config'] = getattr(self.options, 'return_config')
if getattr(self.options, 'return_kwargs'):
kwargs['ret_kwargs'] = yamlify_arg(
getattr(self.options, 'return_kwargs'))
if getattr(self.options, 'module_executors'):
kwargs['module_executors'] = yamlify_arg(getattr(self.options, 'module_executors'))
if getattr(self.options, 'executor_opts'):
kwargs['executor_opts'] = yamlify_arg(getattr(self.options, 'executor_opts'))
if getattr(self.options, 'metadata'):
kwargs['metadata'] = yamlify_arg(
getattr(self.options, 'metadata'))
# If using eauth and a token hasn't already been loaded into
# kwargs, prompt the user to enter auth credentials
if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.config)
res = resolver.cli(self.options.eauth)
if self.options.mktoken and res:
tok = resolver.token_cli(
self.options.eauth,
res
)
if tok:
kwargs['token'] = tok.get('token', '')
if not res:
sys.stderr.write('ERROR: Authentication failed\n')
sys.exit(2)
kwargs.update(res)
kwargs['eauth'] = self.options.eauth
if self.config['async']:
jid = self.local_client.cmd_async(**kwargs)
salt.utils.stringutils.print_cli('Executed command with job ID: {0}'.format(jid))
return
# local will be None when there was an error
if not self.local_client:
return
retcodes = []
errors = []
try:
if self.options.subset:
cmd_func = self.local_client.cmd_subset
kwargs['sub'] = self.options.subset
kwargs['cli'] = True
else:
cmd_func = self.local_client.cmd_cli
if self.options.progress:
kwargs['progress'] = True
self.config['progress'] = True
ret = {}
for progress in cmd_func(**kwargs):
out = 'progress'
try:
self._progress_ret(progress, out)
except LoaderError as exc:
raise SaltSystemExit(exc)
if 'return_count' not in progress:
ret.update(progress)
self._progress_end(out)
self._print_returns_summary(ret)
elif self.config['fun'] == 'sys.doc':
ret = {}
out = ''
for full_ret in self.local_client.cmd_cli(**kwargs):
ret_, out, retcode = self._format_ret(full_ret)
ret.update(ret_)
self._output_ret(ret, out, retcode=retcode)
else:
if self.options.verbose:
kwargs['verbose'] = True
ret = {}
for full_ret in cmd_func(**kwargs):
try:
ret_, out, retcode = self._format_ret(full_ret)
retcodes.append(retcode)
self._output_ret(ret_, out, retcode=retcode)
ret.update(full_ret)
except KeyError:
errors.append(full_ret)
# Returns summary
if self.config['cli_summary'] is True:
if self.config['fun'] != 'sys.doc':
if self.options.output is None:
self._print_returns_summary(ret)
self._print_errors_summary(errors)
# NOTE: Return code is set here based on if all minions
# returned 'ok' with a retcode of 0.
# This is the final point before the 'salt' cmd returns,
# which is why we set the retcode here.
if not all(exit_code == salt.defaults.exitcodes.EX_OK for exit_code in retcodes):
sys.stderr.write('ERROR: Minions returned with non-zero exit code\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except (AuthenticationError,
AuthorizationError,
SaltInvocationError,
EauthAuthenticationError,
SaltClientError) as exc:
ret = six.text_type(exc)
self._output_ret(ret, '', retcode=1)
|
Execute the salt command line
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/salt.py#L36-L224
|
[
"def get_local_client(\n c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),\n mopts=None,\n skip_perm_errors=False,\n io_loop=None,\n auto_reconnect=False):\n '''\n .. versionadded:: 2014.7.0\n\n Read in the config and return the correct LocalClient object based on\n the configured transport\n\n :param IOLoop io_loop: io_loop used for events.\n Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n if mopts:\n opts = mopts\n else:\n # Late import to prevent circular import\n import salt.config\n opts = salt.config.client_config(c_path)\n\n # TODO: AIO core is separate from transport\n return LocalClient(\n mopts=opts,\n skip_perm_errors=skip_perm_errors,\n io_loop=io_loop,\n auto_reconnect=auto_reconnect)\n",
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def print_cli(msg, retries=10, step=0.01):\n '''\n Wrapper around print() that suppresses tracebacks on broken pipes (i.e.\n when salt output is piped to less and less is stopped prematurely).\n '''\n while retries:\n try:\n try:\n print(msg)\n except UnicodeEncodeError:\n print(msg.encode('utf-8'))\n except IOError as exc:\n err = \"{0}\".format(exc)\n if exc.errno != errno.EPIPE:\n if (\n (\"temporarily unavailable\" in err or\n exc.errno in (errno.EAGAIN,)) and\n retries\n ):\n time.sleep(step)\n retries -= 1\n continue\n else:\n raise\n break\n",
"def verify_log(opts):\n '''\n If an insecre logging configuration is found, show a warning\n '''\n level = LOG_LEVELS.get(str(opts.get('log_level')).lower(), logging.NOTSET)\n\n if level < logging.INFO:\n log.warning('Insecure logging configuration detected! Sensitive data may be logged.')\n",
"def yamlify_arg(arg):\n '''\n yaml.safe_load the arg\n '''\n if not isinstance(arg, six.string_types):\n return arg\n\n if arg.strip() == '':\n # Because YAML loads empty (or all whitespace) strings as None, we\n # return the original string\n # >>> import yaml\n # >>> yaml.load('') is None\n # True\n # >>> yaml.load(' ') is None\n # True\n return arg\n\n elif '_' in arg and all([x in '0123456789_' for x in arg.strip()]):\n # When the stripped string includes just digits and underscores, the\n # underscores are ignored and the digits are combined together and\n # loaded as an int. We don't want that, so return the original value.\n return arg\n\n try:\n # Explicit late import to avoid circular import. DO NOT MOVE THIS.\n import salt.utils.yaml\n original_arg = arg\n if '#' in arg:\n # Only yamlify if it parses into a non-string type, to prevent\n # loss of content due to # as comment character\n parsed_arg = salt.utils.yaml.safe_load(arg)\n if isinstance(parsed_arg, six.string_types) or parsed_arg is None:\n return arg\n return parsed_arg\n if arg == 'None':\n arg = None\n else:\n arg = salt.utils.yaml.safe_load(arg)\n\n if isinstance(arg, dict):\n # dicts must be wrapped in curly braces\n if (isinstance(original_arg, six.string_types) and\n not original_arg.startswith('{')):\n return original_arg\n else:\n return arg\n\n elif isinstance(arg, list):\n # lists must be wrapped in brackets\n if (isinstance(original_arg, six.string_types) and\n not original_arg.startswith('[')):\n return original_arg\n else:\n return arg\n\n elif arg is None \\\n or isinstance(arg, (list, float, six.integer_types, six.string_types)):\n # yaml.safe_load will load '|' as '', don't let it do that.\n if arg == '' and original_arg in ('|',):\n return original_arg\n # yaml.safe_load will treat '#' as a comment, so a value of '#'\n # will become None. Keep this value from being stomped as well.\n elif arg is None and original_arg.strip().startswith('#'):\n return original_arg\n else:\n return arg\n else:\n # we don't support this type\n return original_arg\n except Exception:\n # In case anything goes wrong...\n return original_arg\n",
"def cli(self, eauth):\n '''\n Execute the CLI options to fill in the extra data needed for the\n defined eauth system\n '''\n ret = {}\n if not eauth:\n print('External authentication system has not been specified')\n return ret\n fstr = '{0}.auth'.format(eauth)\n if fstr not in self.auth:\n print(('The specified external authentication system \"{0}\" is '\n 'not available').format(eauth))\n print(\"Available eauth types: {0}\".format(\", \".join(self.auth.file_mapping.keys())))\n return ret\n\n args = salt.utils.args.arg_lookup(self.auth[fstr])\n for arg in args['args']:\n if arg in self.opts:\n ret[arg] = self.opts[arg]\n elif arg.startswith('pass'):\n ret[arg] = getpass.getpass('{0}: '.format(arg))\n else:\n ret[arg] = input('{0}: '.format(arg))\n for kwarg, default in list(args['kwargs'].items()):\n if kwarg in self.opts:\n ret['kwarg'] = self.opts[kwarg]\n else:\n ret[kwarg] = input('{0} [{1}]: '.format(kwarg, default))\n\n # Use current user if empty\n if 'username' in ret and not ret['username']:\n ret['username'] = salt.utils.user.get_user()\n\n return ret\n",
"def token_cli(self, eauth, load):\n '''\n Create the token from the CLI and request the correct data to\n authenticate via the passed authentication mechanism\n '''\n load['cmd'] = 'mk_token'\n load['eauth'] = eauth\n tdata = self._send_token_request(load)\n if 'token' not in tdata:\n return tdata\n try:\n with salt.utils.files.set_umask(0o177):\n with salt.utils.files.fopen(self.opts['token_file'], 'w+') as fp_:\n fp_.write(tdata['token'])\n except (IOError, OSError):\n pass\n return tdata\n",
"def _preview_target(self):\n '''\n Return a list of minions from a given target\n '''\n return self.local_client.gather_minions(self.config['tgt'], self.selected_target_option or 'glob')\n",
"def _run_batch(self):\n import salt.cli.batch\n eauth = {}\n if 'token' in self.config:\n eauth['token'] = self.config['token']\n\n # If using eauth and a token hasn't already been loaded into\n # kwargs, prompt the user to enter auth credentials\n if 'token' not in eauth and self.options.eauth:\n # This is expensive. Don't do it unless we need to.\n import salt.auth\n resolver = salt.auth.Resolver(self.config)\n res = resolver.cli(self.options.eauth)\n if self.options.mktoken and res:\n tok = resolver.token_cli(\n self.options.eauth,\n res\n )\n if tok:\n eauth['token'] = tok.get('token', '')\n if not res:\n sys.stderr.write('ERROR: Authentication failed\\n')\n sys.exit(2)\n eauth.update(res)\n eauth['eauth'] = self.options.eauth\n\n if self.options.static:\n\n if not self.options.batch:\n self.config['batch'] = '100%'\n\n try:\n batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True)\n except SaltClientError:\n sys.exit(2)\n\n ret = {}\n\n for res in batch.run():\n ret.update(res)\n\n self._output_ret(ret, '')\n\n else:\n try:\n self.config['batch'] = self.options.batch\n batch = salt.cli.batch.Batch(self.config, eauth=eauth, parser=self.options)\n except SaltClientError:\n # We will print errors to the console further down the stack\n sys.exit(1)\n # Printing the output is already taken care of in run() itself\n retcode = 0\n for res in batch.run():\n for ret in six.itervalues(res):\n job_retcode = salt.utils.job.get_retcode(ret)\n if job_retcode > retcode:\n # Exit with the highest retcode we find\n retcode = job_retcode\n sys.exit(retcode)\n",
"def _print_errors_summary(self, errors):\n if errors:\n salt.utils.stringutils.print_cli('\\n')\n salt.utils.stringutils.print_cli('---------------------------')\n salt.utils.stringutils.print_cli('Errors')\n salt.utils.stringutils.print_cli('---------------------------')\n for error in errors:\n salt.utils.stringutils.print_cli(self._format_error(error))\n",
"def _print_returns_summary(self, ret):\n '''\n Display returns summary\n '''\n return_counter = 0\n not_return_counter = 0\n not_return_minions = []\n not_response_minions = []\n not_connected_minions = []\n failed_minions = []\n for each_minion in ret:\n minion_ret = ret[each_minion]\n if isinstance(minion_ret, dict) and 'ret' in minion_ret:\n minion_ret = ret[each_minion].get('ret')\n if (\n isinstance(minion_ret, six.string_types)\n and minion_ret.startswith(\"Minion did not return\")\n ):\n if \"Not connected\" in minion_ret:\n not_connected_minions.append(each_minion)\n elif \"No response\" in minion_ret:\n not_response_minions.append(each_minion)\n not_return_counter += 1\n not_return_minions.append(each_minion)\n else:\n return_counter += 1\n if self._get_retcode(ret[each_minion]):\n failed_minions.append(each_minion)\n salt.utils.stringutils.print_cli('\\n')\n salt.utils.stringutils.print_cli('-------------------------------------------')\n salt.utils.stringutils.print_cli('Summary')\n salt.utils.stringutils.print_cli('-------------------------------------------')\n salt.utils.stringutils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))\n salt.utils.stringutils.print_cli('# of minions returned: {0}'.format(return_counter))\n salt.utils.stringutils.print_cli('# of minions that did not return: {0}'.format(not_return_counter))\n salt.utils.stringutils.print_cli('# of minions with errors: {0}'.format(len(failed_minions)))\n if self.options.verbose:\n if not_connected_minions:\n salt.utils.stringutils.print_cli('Minions not connected: {0}'.format(\" \".join(not_connected_minions)))\n if not_response_minions:\n salt.utils.stringutils.print_cli('Minions not responding: {0}'.format(\" \".join(not_response_minions)))\n if failed_minions:\n salt.utils.stringutils.print_cli('Minions with failures: {0}'.format(\" \".join(failed_minions)))\n salt.utils.stringutils.print_cli('-------------------------------------------')\n",
"def _progress_end(self, out):\n import salt.output\n salt.output.progress_end(self.progress_bar)\n",
"def _progress_ret(self, progress, out):\n '''\n Print progress events\n '''\n import salt.output\n # Get the progress bar\n if not hasattr(self, 'progress_bar'):\n try:\n self.progress_bar = salt.output.get_progress(self.config, out, progress)\n except Exception:\n raise LoaderError('\\nWARNING: Install the `progressbar` python package. '\n 'Requested job was still run but output cannot be displayed.\\n')\n salt.output.update_progress(self.config, progress, self.progress_bar, out)\n",
"def _output_ret(self, ret, out, retcode=0):\n '''\n Print the output from a single return to the terminal\n '''\n import salt.output\n # Handle special case commands\n if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception):\n self._print_docs(ret)\n else:\n # Determine the proper output method and run it\n salt.output.display_output(ret,\n out=out,\n opts=self.config,\n _retcode=retcode)\n if not ret:\n sys.stderr.write('ERROR: No return received\\n')\n sys.exit(2)\n",
"def _format_ret(self, full_ret):\n '''\n Take the full return data and format it to simple output\n '''\n ret = {}\n out = ''\n retcode = 0\n for key, data in six.iteritems(full_ret):\n ret[key] = data['ret']\n if 'out' in data:\n out = data['out']\n ret_retcode = self._get_retcode(data)\n if ret_retcode > retcode:\n retcode = ret_retcode\n return ret, out, retcode\n"
] |
class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
'''
The execution of a salt command happens here
'''
def _preview_target(self):
'''
Return a list of minions from a given target
'''
return self.local_client.gather_minions(self.config['tgt'], self.selected_target_option or 'glob')
def _run_batch(self):
import salt.cli.batch
eauth = {}
if 'token' in self.config:
eauth['token'] = self.config['token']
# If using eauth and a token hasn't already been loaded into
# kwargs, prompt the user to enter auth credentials
if 'token' not in eauth and self.options.eauth:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.config)
res = resolver.cli(self.options.eauth)
if self.options.mktoken and res:
tok = resolver.token_cli(
self.options.eauth,
res
)
if tok:
eauth['token'] = tok.get('token', '')
if not res:
sys.stderr.write('ERROR: Authentication failed\n')
sys.exit(2)
eauth.update(res)
eauth['eauth'] = self.options.eauth
if self.options.static:
if not self.options.batch:
self.config['batch'] = '100%'
try:
batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True)
except SaltClientError:
sys.exit(2)
ret = {}
for res in batch.run():
ret.update(res)
self._output_ret(ret, '')
else:
try:
self.config['batch'] = self.options.batch
batch = salt.cli.batch.Batch(self.config, eauth=eauth, parser=self.options)
except SaltClientError:
# We will print errors to the console further down the stack
sys.exit(1)
# Printing the output is already taken care of in run() itself
retcode = 0
for res in batch.run():
for ret in six.itervalues(res):
job_retcode = salt.utils.job.get_retcode(ret)
if job_retcode > retcode:
# Exit with the highest retcode we find
retcode = job_retcode
sys.exit(retcode)
def _print_errors_summary(self, errors):
if errors:
salt.utils.stringutils.print_cli('\n')
salt.utils.stringutils.print_cli('---------------------------')
salt.utils.stringutils.print_cli('Errors')
salt.utils.stringutils.print_cli('---------------------------')
for error in errors:
salt.utils.stringutils.print_cli(self._format_error(error))
def _print_returns_summary(self, ret):
'''
Display returns summary
'''
return_counter = 0
not_return_counter = 0
not_return_minions = []
not_response_minions = []
not_connected_minions = []
failed_minions = []
for each_minion in ret:
minion_ret = ret[each_minion]
if isinstance(minion_ret, dict) and 'ret' in minion_ret:
minion_ret = ret[each_minion].get('ret')
if (
isinstance(minion_ret, six.string_types)
and minion_ret.startswith("Minion did not return")
):
if "Not connected" in minion_ret:
not_connected_minions.append(each_minion)
elif "No response" in minion_ret:
not_response_minions.append(each_minion)
not_return_counter += 1
not_return_minions.append(each_minion)
else:
return_counter += 1
if self._get_retcode(ret[each_minion]):
failed_minions.append(each_minion)
salt.utils.stringutils.print_cli('\n')
salt.utils.stringutils.print_cli('-------------------------------------------')
salt.utils.stringutils.print_cli('Summary')
salt.utils.stringutils.print_cli('-------------------------------------------')
salt.utils.stringutils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))
salt.utils.stringutils.print_cli('# of minions returned: {0}'.format(return_counter))
salt.utils.stringutils.print_cli('# of minions that did not return: {0}'.format(not_return_counter))
salt.utils.stringutils.print_cli('# of minions with errors: {0}'.format(len(failed_minions)))
if self.options.verbose:
if not_connected_minions:
salt.utils.stringutils.print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions)))
if not_response_minions:
salt.utils.stringutils.print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions)))
if failed_minions:
salt.utils.stringutils.print_cli('Minions with failures: {0}'.format(" ".join(failed_minions)))
salt.utils.stringutils.print_cli('-------------------------------------------')
def _progress_end(self, out):
import salt.output
salt.output.progress_end(self.progress_bar)
def _progress_ret(self, progress, out):
'''
Print progress events
'''
import salt.output
# Get the progress bar
if not hasattr(self, 'progress_bar'):
try:
self.progress_bar = salt.output.get_progress(self.config, out, progress)
except Exception:
raise LoaderError('\nWARNING: Install the `progressbar` python package. '
'Requested job was still run but output cannot be displayed.\n')
salt.output.update_progress(self.config, progress, self.progress_bar, out)
def _output_ret(self, ret, out, retcode=0):
'''
Print the output from a single return to the terminal
'''
import salt.output
# Handle special case commands
if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception):
self._print_docs(ret)
else:
# Determine the proper output method and run it
salt.output.display_output(ret,
out=out,
opts=self.config,
_retcode=retcode)
if not ret:
sys.stderr.write('ERROR: No return received\n')
sys.exit(2)
def _format_ret(self, full_ret):
'''
Take the full return data and format it to simple output
'''
ret = {}
out = ''
retcode = 0
for key, data in six.iteritems(full_ret):
ret[key] = data['ret']
if 'out' in data:
out = data['out']
ret_retcode = self._get_retcode(data)
if ret_retcode > retcode:
retcode = ret_retcode
return ret, out, retcode
def _get_retcode(self, ret):
'''
Determine a retcode for a given return
'''
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get('retcode', 0) != 0:
if isinstance(ret.get('retcode', 0), dict):
return max(six.itervalues(ret.get('retcode', {0: 0})))
return ret['retcode']
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:
return 1
return retcode
def _format_error(self, minion_error):
for minion, error_doc in six.iteritems(minion_error):
error = 'Minion [{0}] encountered exception \'{1}\''.format(minion, error_doc['message'])
return error
def _print_docs(self, ret):
'''
Print out the docstrings for all of the functions on the minions
'''
import salt.output
docs = {}
if not ret:
self.exit(2, 'No minions found to gather docs from\n')
if isinstance(ret, six.string_types):
self.exit(2, '{0}\n'.format(ret))
for host in ret:
if isinstance(ret[host], six.string_types) \
and (ret[host].startswith("Minion did not return")
or ret[host] == 'VALUE_TRIMMED'):
continue
for fun in ret[host]:
if fun not in docs and ret[host][fun]:
docs[fun] = ret[host][fun]
if self.options.output:
for fun in sorted(docs):
salt.output.display_output({fun: docs[fun]}, 'nested', self.config)
else:
for fun in sorted(docs):
salt.utils.stringutils.print_cli('{0}:'.format(fun))
salt.utils.stringutils.print_cli(docs[fun])
salt.utils.stringutils.print_cli('')
|
saltstack/salt
|
salt/cli/salt.py
|
SaltCMD._print_returns_summary
|
python
|
def _print_returns_summary(self, ret):
'''
Display returns summary
'''
return_counter = 0
not_return_counter = 0
not_return_minions = []
not_response_minions = []
not_connected_minions = []
failed_minions = []
for each_minion in ret:
minion_ret = ret[each_minion]
if isinstance(minion_ret, dict) and 'ret' in minion_ret:
minion_ret = ret[each_minion].get('ret')
if (
isinstance(minion_ret, six.string_types)
and minion_ret.startswith("Minion did not return")
):
if "Not connected" in minion_ret:
not_connected_minions.append(each_minion)
elif "No response" in minion_ret:
not_response_minions.append(each_minion)
not_return_counter += 1
not_return_minions.append(each_minion)
else:
return_counter += 1
if self._get_retcode(ret[each_minion]):
failed_minions.append(each_minion)
salt.utils.stringutils.print_cli('\n')
salt.utils.stringutils.print_cli('-------------------------------------------')
salt.utils.stringutils.print_cli('Summary')
salt.utils.stringutils.print_cli('-------------------------------------------')
salt.utils.stringutils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))
salt.utils.stringutils.print_cli('# of minions returned: {0}'.format(return_counter))
salt.utils.stringutils.print_cli('# of minions that did not return: {0}'.format(not_return_counter))
salt.utils.stringutils.print_cli('# of minions with errors: {0}'.format(len(failed_minions)))
if self.options.verbose:
if not_connected_minions:
salt.utils.stringutils.print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions)))
if not_response_minions:
salt.utils.stringutils.print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions)))
if failed_minions:
salt.utils.stringutils.print_cli('Minions with failures: {0}'.format(" ".join(failed_minions)))
salt.utils.stringutils.print_cli('-------------------------------------------')
|
Display returns summary
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/salt.py#L301-L344
| null |
class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
'''
The execution of a salt command happens here
'''
def run(self):
'''
Execute the salt command line
'''
import salt.client
self.parse_args()
if self.config['log_level'] not in ('quiet', ):
# Setup file logging!
self.setup_logfile_logger()
verify_log(self.config)
try:
# We don't need to bail on config file permission errors
# if the CLI process is run with the -a flag
skip_perm_errors = self.options.eauth != ''
self.local_client = salt.client.get_local_client(
self.get_config_file_path(),
skip_perm_errors=skip_perm_errors,
auto_reconnect=True)
except SaltClientError as exc:
self.exit(2, '{0}\n'.format(exc))
return
if self.options.batch or self.options.static:
# _run_batch() will handle all output and
# exit with the appropriate error condition
# Execution will not continue past this point
# in batch mode.
self._run_batch()
return
if self.options.preview_target:
minion_list = self._preview_target()
self._output_ret(minion_list, self.config.get('output', 'nested'))
return
if self.options.timeout <= 0:
self.options.timeout = self.local_client.opts['timeout']
kwargs = {
'tgt': self.config['tgt'],
'fun': self.config['fun'],
'arg': self.config['arg'],
'timeout': self.options.timeout,
'show_timeout': self.options.show_timeout,
'show_jid': self.options.show_jid}
if 'token' in self.config:
import salt.utils.files
try:
with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_:
kwargs['key'] = fp_.readline()
except IOError:
kwargs['token'] = self.config['token']
kwargs['delimiter'] = self.options.delimiter
if self.selected_target_option:
kwargs['tgt_type'] = self.selected_target_option
else:
kwargs['tgt_type'] = 'glob'
# If batch_safe_limit is set, check minions matching target and
# potentially switch to batch execution
if self.options.batch_safe_limit > 1:
if len(self._preview_target()) >= self.options.batch_safe_limit:
salt.utils.stringutils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
self.options.batch = self.options.batch_safe_size
self._run_batch()
return
if getattr(self.options, 'return'):
kwargs['ret'] = getattr(self.options, 'return')
if getattr(self.options, 'return_config'):
kwargs['ret_config'] = getattr(self.options, 'return_config')
if getattr(self.options, 'return_kwargs'):
kwargs['ret_kwargs'] = yamlify_arg(
getattr(self.options, 'return_kwargs'))
if getattr(self.options, 'module_executors'):
kwargs['module_executors'] = yamlify_arg(getattr(self.options, 'module_executors'))
if getattr(self.options, 'executor_opts'):
kwargs['executor_opts'] = yamlify_arg(getattr(self.options, 'executor_opts'))
if getattr(self.options, 'metadata'):
kwargs['metadata'] = yamlify_arg(
getattr(self.options, 'metadata'))
# If using eauth and a token hasn't already been loaded into
# kwargs, prompt the user to enter auth credentials
if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.config)
res = resolver.cli(self.options.eauth)
if self.options.mktoken and res:
tok = resolver.token_cli(
self.options.eauth,
res
)
if tok:
kwargs['token'] = tok.get('token', '')
if not res:
sys.stderr.write('ERROR: Authentication failed\n')
sys.exit(2)
kwargs.update(res)
kwargs['eauth'] = self.options.eauth
if self.config['async']:
jid = self.local_client.cmd_async(**kwargs)
salt.utils.stringutils.print_cli('Executed command with job ID: {0}'.format(jid))
return
# local will be None when there was an error
if not self.local_client:
return
retcodes = []
errors = []
try:
if self.options.subset:
cmd_func = self.local_client.cmd_subset
kwargs['sub'] = self.options.subset
kwargs['cli'] = True
else:
cmd_func = self.local_client.cmd_cli
if self.options.progress:
kwargs['progress'] = True
self.config['progress'] = True
ret = {}
for progress in cmd_func(**kwargs):
out = 'progress'
try:
self._progress_ret(progress, out)
except LoaderError as exc:
raise SaltSystemExit(exc)
if 'return_count' not in progress:
ret.update(progress)
self._progress_end(out)
self._print_returns_summary(ret)
elif self.config['fun'] == 'sys.doc':
ret = {}
out = ''
for full_ret in self.local_client.cmd_cli(**kwargs):
ret_, out, retcode = self._format_ret(full_ret)
ret.update(ret_)
self._output_ret(ret, out, retcode=retcode)
else:
if self.options.verbose:
kwargs['verbose'] = True
ret = {}
for full_ret in cmd_func(**kwargs):
try:
ret_, out, retcode = self._format_ret(full_ret)
retcodes.append(retcode)
self._output_ret(ret_, out, retcode=retcode)
ret.update(full_ret)
except KeyError:
errors.append(full_ret)
# Returns summary
if self.config['cli_summary'] is True:
if self.config['fun'] != 'sys.doc':
if self.options.output is None:
self._print_returns_summary(ret)
self._print_errors_summary(errors)
# NOTE: Return code is set here based on if all minions
# returned 'ok' with a retcode of 0.
# This is the final point before the 'salt' cmd returns,
# which is why we set the retcode here.
if not all(exit_code == salt.defaults.exitcodes.EX_OK for exit_code in retcodes):
sys.stderr.write('ERROR: Minions returned with non-zero exit code\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except (AuthenticationError,
AuthorizationError,
SaltInvocationError,
EauthAuthenticationError,
SaltClientError) as exc:
ret = six.text_type(exc)
self._output_ret(ret, '', retcode=1)
def _preview_target(self):
'''
Return a list of minions from a given target
'''
return self.local_client.gather_minions(self.config['tgt'], self.selected_target_option or 'glob')
def _run_batch(self):
import salt.cli.batch
eauth = {}
if 'token' in self.config:
eauth['token'] = self.config['token']
# If using eauth and a token hasn't already been loaded into
# kwargs, prompt the user to enter auth credentials
if 'token' not in eauth and self.options.eauth:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.config)
res = resolver.cli(self.options.eauth)
if self.options.mktoken and res:
tok = resolver.token_cli(
self.options.eauth,
res
)
if tok:
eauth['token'] = tok.get('token', '')
if not res:
sys.stderr.write('ERROR: Authentication failed\n')
sys.exit(2)
eauth.update(res)
eauth['eauth'] = self.options.eauth
if self.options.static:
if not self.options.batch:
self.config['batch'] = '100%'
try:
batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True)
except SaltClientError:
sys.exit(2)
ret = {}
for res in batch.run():
ret.update(res)
self._output_ret(ret, '')
else:
try:
self.config['batch'] = self.options.batch
batch = salt.cli.batch.Batch(self.config, eauth=eauth, parser=self.options)
except SaltClientError:
# We will print errors to the console further down the stack
sys.exit(1)
# Printing the output is already taken care of in run() itself
retcode = 0
for res in batch.run():
for ret in six.itervalues(res):
job_retcode = salt.utils.job.get_retcode(ret)
if job_retcode > retcode:
# Exit with the highest retcode we find
retcode = job_retcode
sys.exit(retcode)
def _print_errors_summary(self, errors):
if errors:
salt.utils.stringutils.print_cli('\n')
salt.utils.stringutils.print_cli('---------------------------')
salt.utils.stringutils.print_cli('Errors')
salt.utils.stringutils.print_cli('---------------------------')
for error in errors:
salt.utils.stringutils.print_cli(self._format_error(error))
def _progress_end(self, out):
import salt.output
salt.output.progress_end(self.progress_bar)
def _progress_ret(self, progress, out):
'''
Print progress events
'''
import salt.output
# Get the progress bar
if not hasattr(self, 'progress_bar'):
try:
self.progress_bar = salt.output.get_progress(self.config, out, progress)
except Exception:
raise LoaderError('\nWARNING: Install the `progressbar` python package. '
'Requested job was still run but output cannot be displayed.\n')
salt.output.update_progress(self.config, progress, self.progress_bar, out)
def _output_ret(self, ret, out, retcode=0):
'''
Print the output from a single return to the terminal
'''
import salt.output
# Handle special case commands
if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception):
self._print_docs(ret)
else:
# Determine the proper output method and run it
salt.output.display_output(ret,
out=out,
opts=self.config,
_retcode=retcode)
if not ret:
sys.stderr.write('ERROR: No return received\n')
sys.exit(2)
def _format_ret(self, full_ret):
'''
Take the full return data and format it to simple output
'''
ret = {}
out = ''
retcode = 0
for key, data in six.iteritems(full_ret):
ret[key] = data['ret']
if 'out' in data:
out = data['out']
ret_retcode = self._get_retcode(data)
if ret_retcode > retcode:
retcode = ret_retcode
return ret, out, retcode
def _get_retcode(self, ret):
'''
Determine a retcode for a given return
'''
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get('retcode', 0) != 0:
if isinstance(ret.get('retcode', 0), dict):
return max(six.itervalues(ret.get('retcode', {0: 0})))
return ret['retcode']
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:
return 1
return retcode
def _format_error(self, minion_error):
for minion, error_doc in six.iteritems(minion_error):
error = 'Minion [{0}] encountered exception \'{1}\''.format(minion, error_doc['message'])
return error
def _print_docs(self, ret):
'''
Print out the docstrings for all of the functions on the minions
'''
import salt.output
docs = {}
if not ret:
self.exit(2, 'No minions found to gather docs from\n')
if isinstance(ret, six.string_types):
self.exit(2, '{0}\n'.format(ret))
for host in ret:
if isinstance(ret[host], six.string_types) \
and (ret[host].startswith("Minion did not return")
or ret[host] == 'VALUE_TRIMMED'):
continue
for fun in ret[host]:
if fun not in docs and ret[host][fun]:
docs[fun] = ret[host][fun]
if self.options.output:
for fun in sorted(docs):
salt.output.display_output({fun: docs[fun]}, 'nested', self.config)
else:
for fun in sorted(docs):
salt.utils.stringutils.print_cli('{0}:'.format(fun))
salt.utils.stringutils.print_cli(docs[fun])
salt.utils.stringutils.print_cli('')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.