repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/modules/snapper.py
|
diff_jid
|
python
|
def diff_jid(jid, config='root'):
'''
Returns the changes applied by a `jid`
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.diff_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return diff(config, num_pre=pre_snapshot, num_post=post_snapshot)
|
Returns the changes applied by a `jid`
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.diff_jid jid=20160607130930720112
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L855-L872
|
[
"def diff(config='root', filename=None, num_pre=None, num_post=None):\n '''\n Returns the differences between two snapshots\n\n config\n Configuration name.\n\n filename\n if not provided the showing differences between snapshots for\n all \"text\" files\n\n num_pre\n first snapshot ID to compare. Default is last snapshot\n\n num_post\n last snapshot ID to compare. Default is 0 (current state)\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' snapper.diff\n salt '*' snapper.diff filename=/var/log/snapper.log num_pre=19 num_post=20\n '''\n try:\n pre, post = _get_num_interval(config, num_pre, num_post)\n\n files = changed_files(config, pre, post)\n if filename:\n files = [filename] if filename in files else []\n\n SUBVOLUME = list_configs()[config]['SUBVOLUME']\n pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME\n post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME\n\n files_diff = dict()\n for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:\n\n _filepath = filepath\n if filepath.startswith(SUBVOLUME):\n _filepath = filepath[len(SUBVOLUME):]\n\n # Just in case, removing possible double '/' from the final file paths\n pre_file = os.path.normpath(pre_mount + \"/\" + _filepath).replace(\"//\", \"/\")\n post_file = os.path.normpath(post_mount + \"/\" + _filepath).replace(\"//\", \"/\")\n\n if os.path.isfile(pre_file):\n pre_file_exists = True\n with salt.utils.files.fopen(pre_file) as rfh:\n pre_file_content = [salt.utils.stringutils.to_unicode(_l)\n for _l in rfh.readlines()]\n else:\n pre_file_content = []\n pre_file_exists = False\n\n if os.path.isfile(post_file):\n post_file_exists = True\n with salt.utils.files.fopen(post_file) as rfh:\n post_file_content = [salt.utils.stringutils.to_unicode(_l)\n for _l in rfh.readlines()]\n else:\n post_file_content = []\n post_file_exists = False\n\n if _is_text_file(pre_file) or _is_text_file(post_file):\n files_diff[filepath] = {\n 'comment': \"text file changed\",\n 'diff': ''.join(difflib.unified_diff(pre_file_content,\n post_file_content,\n fromfile=pre_file,\n tofile=post_file))}\n\n if pre_file_exists and not post_file_exists:\n files_diff[filepath]['comment'] = \"text file deleted\"\n if not pre_file_exists and post_file_exists:\n files_diff[filepath]['comment'] = \"text file created\"\n\n elif not _is_text_file(pre_file) and not _is_text_file(post_file):\n # This is a binary file\n files_diff[filepath] = {'comment': \"binary file changed\"}\n if pre_file_exists:\n files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))\n if post_file_exists:\n files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))\n if post_file_exists and not pre_file_exists:\n files_diff[filepath]['comment'] = \"binary file created\"\n if pre_file_exists and not post_file_exists:\n files_diff[filepath]['comment'] = \"binary file deleted\"\n\n if pre:\n snapper.UmountSnapshot(config, pre, False)\n if post:\n snapper.UmountSnapshot(config, post, False)\n return files_diff\n except dbus.DBusException as exc:\n raise CommandExecutionError(\n 'Error encountered while showing differences between snapshots: {0}'\n .format(_dbus_exception_to_reason(exc, locals()))\n )\n",
"def _get_jid_snapshots(jid, config='root'):\n '''\n Returns pre/post snapshots made by a given Salt jid\n\n Looks for 'salt_jid' entries into snapshots userdata which are created\n when 'snapper.run' is executed.\n '''\n jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get(\"salt_jid\") == jid]\n pre_snapshot = [x for x in jid_snapshots if x['type'] == \"pre\"]\n post_snapshot = [x for x in jid_snapshots if x['type'] == \"post\"]\n\n if not pre_snapshot or not post_snapshot:\n raise CommandExecutionError(\"Jid '{0}' snapshots not found\".format(jid))\n\n return (\n pre_snapshot[0]['id'],\n post_snapshot[0]['id']\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Module to manage filesystem snapshots with snapper
.. versionadded:: 2016.11.0
:codeauthor: Duncan Mac-Vicar P. <dmacvicar@suse.de>
:codeauthor: Pablo Suárez Hernández <psuarezhernandez@suse.de>
:depends: ``dbus`` Python module.
:depends: ``snapper`` http://snapper.io, available in most distros
:maturity: new
:platform: Linux
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
import difflib
try:
from pwd import getpwuid
HAS_PWD = True
except ImportError:
HAS_PWD = False
from salt.exceptions import CommandExecutionError
import salt.utils.files
# import 3rd party libs
from salt.ext import six
try:
import dbus # pylint: disable=wrong-import-order
HAS_DBUS = True
except ImportError:
HAS_DBUS = False
DBUS_STATUS_MAP = {
1: "created",
2: "deleted",
4: "type changed",
8: "modified",
16: "permission changed",
32: "owner changed",
64: "group changed",
128: "extended attributes changed",
256: "ACL info changed",
}
SNAPPER_DBUS_OBJECT = 'org.opensuse.Snapper'
SNAPPER_DBUS_PATH = '/org/opensuse/Snapper'
SNAPPER_DBUS_INTERFACE = 'org.opensuse.Snapper'
# pylint: disable=invalid-name
log = logging.getLogger(__name__)
bus = None
system_bus_error = None
snapper = None
snapper_error = None
if HAS_DBUS:
try:
bus = dbus.SystemBus()
except dbus.DBusException as exc:
log.warning(exc)
system_bus_error = exc
else:
if SNAPPER_DBUS_OBJECT in bus.list_activatable_names():
try:
snapper = dbus.Interface(bus.get_object(SNAPPER_DBUS_OBJECT,
SNAPPER_DBUS_PATH),
dbus_interface=SNAPPER_DBUS_INTERFACE)
except (dbus.DBusException, ValueError) as exc:
log.warning(exc)
snapper_error = exc
else:
snapper_error = 'snapper is missing'
# pylint: enable=invalid-name
def __virtual__():
error_msg = 'The snapper module cannot be loaded: {0}'
if not HAS_DBUS:
return False, error_msg.format('missing python dbus module')
elif not snapper:
return False, error_msg.format(snapper_error)
elif not bus:
return False, error_msg.format(system_bus_error)
elif not HAS_PWD:
return False, error_msg.format('pwd module not available')
return 'snapper'
def _snapshot_to_data(snapshot):
'''
Returns snapshot data from a D-Bus response.
A snapshot D-Bus response is a dbus.Struct containing the
information related to a snapshot:
[id, type, pre_snapshot, timestamp, user, description,
cleanup_algorithm, userdata]
id: dbus.UInt32
type: dbus.UInt16
pre_snapshot: dbus.UInt32
timestamp: dbus.Int64
user: dbus.UInt32
description: dbus.String
cleaup_algorithm: dbus.String
userdata: dbus.Dictionary
'''
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data
def _dbus_exception_to_reason(exc, args):
'''
Returns a error message from a snapper DBusException
'''
error = exc.get_dbus_name()
if error == 'error.unknown_config':
return "Unknown configuration '{0}'".format(args['config'])
elif error == 'error.illegal_snapshot':
return 'Invalid snapshot'
else:
return exc.get_dbus_name()
def list_snapshots(config='root'):
'''
List available snapshots
CLI example:
.. code-block:: bash
salt '*' snapper.list_snapshots config=myconfig
'''
try:
snapshots = snapper.ListSnapshots(config)
return [_snapshot_to_data(s) for s in snapshots]
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def get_snapshot(number=0, config='root'):
'''
Get detailed information about a given snapshot
CLI example:
.. code-block:: bash
salt '*' snapper.get_snapshot 1
'''
try:
snapshot = snapper.GetSnapshot(config, int(number))
return _snapshot_to_data(snapshot)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving snapshot: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def list_configs():
'''
List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs
'''
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def _config_filter(value):
if isinstance(value, bool):
return 'yes' if value else 'no'
return value
def set_config(name='root', **kwargs):
'''
Set configuration values
CLI example:
.. code-block:: bash
salt '*' snapper.set_config SYNC_ACL=True
Keys are case insensitive as they will be always uppercased to snapper
convention. The above example is equivalent to:
.. code-block:: bash
salt '*' snapper.set_config sync_acl=True
'''
try:
data = dict((k.upper(), _config_filter(v)) for k, v in
kwargs.items() if not k.startswith('__'))
snapper.SetConfig(name, data)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while setting configuration {0}: {1}'
.format(name, _dbus_exception_to_reason(exc, locals()))
)
return True
def _get_last_snapshot(config='root'):
'''
Returns the last existing created snapshot
'''
snapshot_list = sorted(list_snapshots(config), key=lambda x: x['id'])
return snapshot_list[-1]
def status_to_string(dbus_status):
'''
Converts a numeric dbus snapper status into a string
CLI Example:
.. code-block:: bash
salt '*' snapper.status_to_string <dbus_status>
'''
status_tuple = (
dbus_status & 0b000000001, dbus_status & 0b000000010, dbus_status & 0b000000100,
dbus_status & 0b000001000, dbus_status & 0b000010000, dbus_status & 0b000100000,
dbus_status & 0b001000000, dbus_status & 0b010000000, dbus_status & 0b100000000
)
return [DBUS_STATUS_MAP[status] for status in status_tuple if status]
def get_config(name='root'):
'''
Retrieves all values from a given configuration
CLI example:
.. code-block:: bash
salt '*' snapper.get_config
'''
try:
config = snapper.GetConfig(name)
return config
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_config(name=None,
subvolume=None,
fstype=None,
template=None,
extra_opts=None):
'''
Creates a new Snapper configuration
name
Name of the new Snapper configuration.
subvolume
Path to the related subvolume.
fstype
Filesystem type of the subvolume.
template
Configuration template to use. (Default: default)
extra_opts
Extra Snapper configuration opts dictionary. It will override the values provided
by the given template (if any).
CLI example:
.. code-block:: bash
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs template="default"
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs extra_opts='{"NUMBER_CLEANUP": False}'
'''
def raise_arg_error(argname):
raise CommandExecutionError(
'You must provide a "{0}" for the new configuration'.format(argname)
)
if not name:
raise_arg_error("name")
if not subvolume:
raise_arg_error("subvolume")
if not fstype:
raise_arg_error("fstype")
if not template:
template = ""
try:
snapper.CreateConfig(name, subvolume, fstype, template)
if extra_opts:
set_config(name, **extra_opts)
return get_config(name)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while creating the new configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_snapshot(config='root', snapshot_type='single', pre_number=None,
description=None, cleanup_algorithm='number', userdata=None,
**kwargs):
'''
Creates an snapshot
config
Configuration name.
snapshot_type
Specifies the type of the new snapshot. Possible values are
single, pre and post.
pre_number
For post snapshots the number of the pre snapshot must be
provided.
description
Description for the snapshot. If not given, the salt job will be used.
cleanup_algorithm
Set the cleanup algorithm for the snapshot.
number
Deletes old snapshots when a certain number of snapshots
is reached.
timeline
Deletes old snapshots but keeps a number of hourly,
daily, weekly, monthly and yearly snapshots.
empty-pre-post
Deletes pre/post snapshot pairs with empty diffs.
userdata
Set userdata for the snapshot (key-value pairs).
Returns the number of the created snapshot.
CLI example:
.. code-block:: bash
salt '*' snapper.create_snapshot
'''
if not userdata:
userdata = {}
jid = kwargs.get('__pub_jid')
if description is None and jid is not None:
description = 'salt job {0}'.format(jid)
if jid is not None:
userdata['salt_jid'] = jid
new_nr = None
try:
if snapshot_type == 'single':
new_nr = snapper.CreateSingleSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'pre':
new_nr = snapper.CreatePreSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'post':
if pre_number is None:
raise CommandExecutionError(
"pre snapshot number 'pre_number' needs to be"
"specified for snapshots of the 'post' type")
new_nr = snapper.CreatePostSnapshot(config, pre_number, description,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'".format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
return new_nr
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
'''
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
'''
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
# Updating only the explicitly provided attributes by the user
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def _get_num_interval(config, num_pre, num_post):
'''
Returns numerical interval based on optionals num_pre, num_post values
'''
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post
def _is_text_file(filename):
'''
Checks if a file is a text file
'''
type_of_file = os.popen('file -bi {0}'.format(filename), 'r').read()
return type_of_file.startswith('text')
def run(function, *args, **kwargs):
'''
Runs a function from an execution module creating pre and post snapshots
and associating the salt job id with those snapshots for easy undo and
cleanup.
function
Salt function to call.
config
Configuration name. (default: "root")
description
A description for the snapshots. (default: None)
userdata
Data to include in the snapshot metadata. (default: None)
cleanup_algorithm
Snapper cleanup algorithm. (default: "number")
`*args`
args for the function to call. (default: None)
`**kwargs`
kwargs for the function to call (default: None)
This would run append text to /etc/motd using the file.append
module, and will create two snapshots, pre and post with the associated
metadata. The jid will be available as salt_jid in the userdata of the
snapshot.
You can immediately see the changes
CLI Example:
.. code-block:: bash
salt '*' snapper.run file.append args='["/etc/motd", "some text"]'
'''
config = kwargs.pop("config", "root")
description = kwargs.pop("description", "snapper.run[{0}]".format(function))
cleanup_algorithm = kwargs.pop("cleanup_algorithm", "number")
userdata = kwargs.pop("userdata", {})
func_kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
kwargs = dict((k, v) for k, v in kwargs.items() if k.startswith('__'))
pre_nr = __salt__['snapper.create_snapshot'](
config=config,
snapshot_type='pre',
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
if function not in __salt__:
raise CommandExecutionError(
'function "{0}" does not exist'.format(function)
)
try:
ret = __salt__[function](*args, **func_kwargs)
except CommandExecutionError as exc:
ret = "\n".join([six.text_type(exc), __salt__[function].__doc__])
__salt__['snapper.create_snapshot'](
config=config,
snapshot_type='post',
pre_number=pre_nr,
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
return ret
def status(config='root', num_pre=None, num_post=None):
'''
Returns a comparison between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.status
salt '*' snapper.status num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
# In case of SUBVOLUME is included in filepath we remove it
# to prevent from filepath starting with double '/'
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def changed_files(config='root', num_pre=None, num_post=None):
'''
Returns the files changed between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.changed_files
salt '*' snapper.changed_files num_pre=19 num_post=20
'''
return status(config, num_pre, num_post).keys()
def undo(config='root', files=None, num_pre=None, num_post=None):
'''
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
'''
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret))
def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
)
def undo_jid(jid, config='root'):
'''
Undo the changes applied by a salt job
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
def diff(config='root', filename=None, num_pre=None, num_post=None):
'''
Returns the differences between two snapshots
config
Configuration name.
filename
if not provided the showing differences between snapshots for
all "text" files
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI Example:
.. code-block:: bash
salt '*' snapper.diff
salt '*' snapper.diff filename=/var/log/snapper.log num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
files = changed_files(config, pre, post)
if filename:
files = [filename] if filename in files else []
SUBVOLUME = list_configs()[config]['SUBVOLUME']
pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME
post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME
files_diff = dict()
for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:
_filepath = filepath
if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):]
# Just in case, removing possible double '/' from the final file paths
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")
if os.path.isfile(pre_file):
pre_file_exists = True
with salt.utils.files.fopen(pre_file) as rfh:
pre_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
pre_file_content = []
pre_file_exists = False
if os.path.isfile(post_file):
post_file_exists = True
with salt.utils.files.fopen(post_file) as rfh:
post_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
post_file_content = []
post_file_exists = False
if _is_text_file(pre_file) or _is_text_file(post_file):
files_diff[filepath] = {
'comment': "text file changed",
'diff': ''.join(difflib.unified_diff(pre_file_content,
post_file_content,
fromfile=pre_file,
tofile=post_file))}
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "text file deleted"
if not pre_file_exists and post_file_exists:
files_diff[filepath]['comment'] = "text file created"
elif not _is_text_file(pre_file) and not _is_text_file(post_file):
# This is a binary file
files_diff[filepath] = {'comment': "binary file changed"}
if pre_file_exists:
files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))
if post_file_exists:
files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))
if post_file_exists and not pre_file_exists:
files_diff[filepath]['comment'] = "binary file created"
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "binary file deleted"
if pre:
snapper.UmountSnapshot(config, pre, False)
if post:
snapper.UmountSnapshot(config, post, False)
return files_diff
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while showing differences between snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_baseline(tag="baseline", config='root'):
'''
Creates a snapshot marked as baseline
tag
Tag name for the baseline
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.create_baseline
salt '*' snapper.create_baseline my_custom_baseline
'''
return __salt__['snapper.create_snapshot'](config=config,
snapshot_type='single',
description="baseline snapshot",
cleanup_algorithm="number",
userdata={"baseline_tag": tag})
|
saltstack/salt
|
salt/modules/snapper.py
|
create_baseline
|
python
|
def create_baseline(tag="baseline", config='root'):
'''
Creates a snapshot marked as baseline
tag
Tag name for the baseline
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.create_baseline
salt '*' snapper.create_baseline my_custom_baseline
'''
return __salt__['snapper.create_snapshot'](config=config,
snapshot_type='single',
description="baseline snapshot",
cleanup_algorithm="number",
userdata={"baseline_tag": tag})
|
Creates a snapshot marked as baseline
tag
Tag name for the baseline
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.create_baseline
salt '*' snapper.create_baseline my_custom_baseline
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L875-L896
| null |
# -*- coding: utf-8 -*-
'''
Module to manage filesystem snapshots with snapper
.. versionadded:: 2016.11.0
:codeauthor: Duncan Mac-Vicar P. <dmacvicar@suse.de>
:codeauthor: Pablo Suárez Hernández <psuarezhernandez@suse.de>
:depends: ``dbus`` Python module.
:depends: ``snapper`` http://snapper.io, available in most distros
:maturity: new
:platform: Linux
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
import difflib
try:
from pwd import getpwuid
HAS_PWD = True
except ImportError:
HAS_PWD = False
from salt.exceptions import CommandExecutionError
import salt.utils.files
# import 3rd party libs
from salt.ext import six
try:
import dbus # pylint: disable=wrong-import-order
HAS_DBUS = True
except ImportError:
HAS_DBUS = False
DBUS_STATUS_MAP = {
1: "created",
2: "deleted",
4: "type changed",
8: "modified",
16: "permission changed",
32: "owner changed",
64: "group changed",
128: "extended attributes changed",
256: "ACL info changed",
}
SNAPPER_DBUS_OBJECT = 'org.opensuse.Snapper'
SNAPPER_DBUS_PATH = '/org/opensuse/Snapper'
SNAPPER_DBUS_INTERFACE = 'org.opensuse.Snapper'
# pylint: disable=invalid-name
log = logging.getLogger(__name__)
bus = None
system_bus_error = None
snapper = None
snapper_error = None
if HAS_DBUS:
try:
bus = dbus.SystemBus()
except dbus.DBusException as exc:
log.warning(exc)
system_bus_error = exc
else:
if SNAPPER_DBUS_OBJECT in bus.list_activatable_names():
try:
snapper = dbus.Interface(bus.get_object(SNAPPER_DBUS_OBJECT,
SNAPPER_DBUS_PATH),
dbus_interface=SNAPPER_DBUS_INTERFACE)
except (dbus.DBusException, ValueError) as exc:
log.warning(exc)
snapper_error = exc
else:
snapper_error = 'snapper is missing'
# pylint: enable=invalid-name
def __virtual__():
error_msg = 'The snapper module cannot be loaded: {0}'
if not HAS_DBUS:
return False, error_msg.format('missing python dbus module')
elif not snapper:
return False, error_msg.format(snapper_error)
elif not bus:
return False, error_msg.format(system_bus_error)
elif not HAS_PWD:
return False, error_msg.format('pwd module not available')
return 'snapper'
def _snapshot_to_data(snapshot):
'''
Returns snapshot data from a D-Bus response.
A snapshot D-Bus response is a dbus.Struct containing the
information related to a snapshot:
[id, type, pre_snapshot, timestamp, user, description,
cleanup_algorithm, userdata]
id: dbus.UInt32
type: dbus.UInt16
pre_snapshot: dbus.UInt32
timestamp: dbus.Int64
user: dbus.UInt32
description: dbus.String
cleaup_algorithm: dbus.String
userdata: dbus.Dictionary
'''
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data
def _dbus_exception_to_reason(exc, args):
'''
Returns a error message from a snapper DBusException
'''
error = exc.get_dbus_name()
if error == 'error.unknown_config':
return "Unknown configuration '{0}'".format(args['config'])
elif error == 'error.illegal_snapshot':
return 'Invalid snapshot'
else:
return exc.get_dbus_name()
def list_snapshots(config='root'):
'''
List available snapshots
CLI example:
.. code-block:: bash
salt '*' snapper.list_snapshots config=myconfig
'''
try:
snapshots = snapper.ListSnapshots(config)
return [_snapshot_to_data(s) for s in snapshots]
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def get_snapshot(number=0, config='root'):
'''
Get detailed information about a given snapshot
CLI example:
.. code-block:: bash
salt '*' snapper.get_snapshot 1
'''
try:
snapshot = snapper.GetSnapshot(config, int(number))
return _snapshot_to_data(snapshot)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving snapshot: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def list_configs():
'''
List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs
'''
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def _config_filter(value):
if isinstance(value, bool):
return 'yes' if value else 'no'
return value
def set_config(name='root', **kwargs):
'''
Set configuration values
CLI example:
.. code-block:: bash
salt '*' snapper.set_config SYNC_ACL=True
Keys are case insensitive as they will be always uppercased to snapper
convention. The above example is equivalent to:
.. code-block:: bash
salt '*' snapper.set_config sync_acl=True
'''
try:
data = dict((k.upper(), _config_filter(v)) for k, v in
kwargs.items() if not k.startswith('__'))
snapper.SetConfig(name, data)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while setting configuration {0}: {1}'
.format(name, _dbus_exception_to_reason(exc, locals()))
)
return True
def _get_last_snapshot(config='root'):
'''
Returns the last existing created snapshot
'''
snapshot_list = sorted(list_snapshots(config), key=lambda x: x['id'])
return snapshot_list[-1]
def status_to_string(dbus_status):
'''
Converts a numeric dbus snapper status into a string
CLI Example:
.. code-block:: bash
salt '*' snapper.status_to_string <dbus_status>
'''
status_tuple = (
dbus_status & 0b000000001, dbus_status & 0b000000010, dbus_status & 0b000000100,
dbus_status & 0b000001000, dbus_status & 0b000010000, dbus_status & 0b000100000,
dbus_status & 0b001000000, dbus_status & 0b010000000, dbus_status & 0b100000000
)
return [DBUS_STATUS_MAP[status] for status in status_tuple if status]
def get_config(name='root'):
'''
Retrieves all values from a given configuration
CLI example:
.. code-block:: bash
salt '*' snapper.get_config
'''
try:
config = snapper.GetConfig(name)
return config
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_config(name=None,
subvolume=None,
fstype=None,
template=None,
extra_opts=None):
'''
Creates a new Snapper configuration
name
Name of the new Snapper configuration.
subvolume
Path to the related subvolume.
fstype
Filesystem type of the subvolume.
template
Configuration template to use. (Default: default)
extra_opts
Extra Snapper configuration opts dictionary. It will override the values provided
by the given template (if any).
CLI example:
.. code-block:: bash
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs template="default"
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs extra_opts='{"NUMBER_CLEANUP": False}'
'''
def raise_arg_error(argname):
raise CommandExecutionError(
'You must provide a "{0}" for the new configuration'.format(argname)
)
if not name:
raise_arg_error("name")
if not subvolume:
raise_arg_error("subvolume")
if not fstype:
raise_arg_error("fstype")
if not template:
template = ""
try:
snapper.CreateConfig(name, subvolume, fstype, template)
if extra_opts:
set_config(name, **extra_opts)
return get_config(name)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while creating the new configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_snapshot(config='root', snapshot_type='single', pre_number=None,
description=None, cleanup_algorithm='number', userdata=None,
**kwargs):
'''
Creates an snapshot
config
Configuration name.
snapshot_type
Specifies the type of the new snapshot. Possible values are
single, pre and post.
pre_number
For post snapshots the number of the pre snapshot must be
provided.
description
Description for the snapshot. If not given, the salt job will be used.
cleanup_algorithm
Set the cleanup algorithm for the snapshot.
number
Deletes old snapshots when a certain number of snapshots
is reached.
timeline
Deletes old snapshots but keeps a number of hourly,
daily, weekly, monthly and yearly snapshots.
empty-pre-post
Deletes pre/post snapshot pairs with empty diffs.
userdata
Set userdata for the snapshot (key-value pairs).
Returns the number of the created snapshot.
CLI example:
.. code-block:: bash
salt '*' snapper.create_snapshot
'''
if not userdata:
userdata = {}
jid = kwargs.get('__pub_jid')
if description is None and jid is not None:
description = 'salt job {0}'.format(jid)
if jid is not None:
userdata['salt_jid'] = jid
new_nr = None
try:
if snapshot_type == 'single':
new_nr = snapper.CreateSingleSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'pre':
new_nr = snapper.CreatePreSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'post':
if pre_number is None:
raise CommandExecutionError(
"pre snapshot number 'pre_number' needs to be"
"specified for snapshots of the 'post' type")
new_nr = snapper.CreatePostSnapshot(config, pre_number, description,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'".format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
return new_nr
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
'''
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
'''
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
# Updating only the explicitly provided attributes by the user
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def _get_num_interval(config, num_pre, num_post):
'''
Returns numerical interval based on optionals num_pre, num_post values
'''
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post
def _is_text_file(filename):
'''
Checks if a file is a text file
'''
type_of_file = os.popen('file -bi {0}'.format(filename), 'r').read()
return type_of_file.startswith('text')
def run(function, *args, **kwargs):
'''
Runs a function from an execution module creating pre and post snapshots
and associating the salt job id with those snapshots for easy undo and
cleanup.
function
Salt function to call.
config
Configuration name. (default: "root")
description
A description for the snapshots. (default: None)
userdata
Data to include in the snapshot metadata. (default: None)
cleanup_algorithm
Snapper cleanup algorithm. (default: "number")
`*args`
args for the function to call. (default: None)
`**kwargs`
kwargs for the function to call (default: None)
This would run append text to /etc/motd using the file.append
module, and will create two snapshots, pre and post with the associated
metadata. The jid will be available as salt_jid in the userdata of the
snapshot.
You can immediately see the changes
CLI Example:
.. code-block:: bash
salt '*' snapper.run file.append args='["/etc/motd", "some text"]'
'''
config = kwargs.pop("config", "root")
description = kwargs.pop("description", "snapper.run[{0}]".format(function))
cleanup_algorithm = kwargs.pop("cleanup_algorithm", "number")
userdata = kwargs.pop("userdata", {})
func_kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
kwargs = dict((k, v) for k, v in kwargs.items() if k.startswith('__'))
pre_nr = __salt__['snapper.create_snapshot'](
config=config,
snapshot_type='pre',
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
if function not in __salt__:
raise CommandExecutionError(
'function "{0}" does not exist'.format(function)
)
try:
ret = __salt__[function](*args, **func_kwargs)
except CommandExecutionError as exc:
ret = "\n".join([six.text_type(exc), __salt__[function].__doc__])
__salt__['snapper.create_snapshot'](
config=config,
snapshot_type='post',
pre_number=pre_nr,
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
return ret
def status(config='root', num_pre=None, num_post=None):
'''
Returns a comparison between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.status
salt '*' snapper.status num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
# In case of SUBVOLUME is included in filepath we remove it
# to prevent from filepath starting with double '/'
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def changed_files(config='root', num_pre=None, num_post=None):
'''
Returns the files changed between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.changed_files
salt '*' snapper.changed_files num_pre=19 num_post=20
'''
return status(config, num_pre, num_post).keys()
def undo(config='root', files=None, num_pre=None, num_post=None):
'''
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
'''
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret))
def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
)
def undo_jid(jid, config='root'):
'''
Undo the changes applied by a salt job
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
def diff(config='root', filename=None, num_pre=None, num_post=None):
'''
Returns the differences between two snapshots
config
Configuration name.
filename
if not provided the showing differences between snapshots for
all "text" files
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI Example:
.. code-block:: bash
salt '*' snapper.diff
salt '*' snapper.diff filename=/var/log/snapper.log num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
files = changed_files(config, pre, post)
if filename:
files = [filename] if filename in files else []
SUBVOLUME = list_configs()[config]['SUBVOLUME']
pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME
post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME
files_diff = dict()
for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:
_filepath = filepath
if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):]
# Just in case, removing possible double '/' from the final file paths
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")
if os.path.isfile(pre_file):
pre_file_exists = True
with salt.utils.files.fopen(pre_file) as rfh:
pre_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
pre_file_content = []
pre_file_exists = False
if os.path.isfile(post_file):
post_file_exists = True
with salt.utils.files.fopen(post_file) as rfh:
post_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
post_file_content = []
post_file_exists = False
if _is_text_file(pre_file) or _is_text_file(post_file):
files_diff[filepath] = {
'comment': "text file changed",
'diff': ''.join(difflib.unified_diff(pre_file_content,
post_file_content,
fromfile=pre_file,
tofile=post_file))}
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "text file deleted"
if not pre_file_exists and post_file_exists:
files_diff[filepath]['comment'] = "text file created"
elif not _is_text_file(pre_file) and not _is_text_file(post_file):
# This is a binary file
files_diff[filepath] = {'comment': "binary file changed"}
if pre_file_exists:
files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))
if post_file_exists:
files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))
if post_file_exists and not pre_file_exists:
files_diff[filepath]['comment'] = "binary file created"
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "binary file deleted"
if pre:
snapper.UmountSnapshot(config, pre, False)
if post:
snapper.UmountSnapshot(config, post, False)
return files_diff
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while showing differences between snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def diff_jid(jid, config='root'):
'''
Returns the changes applied by a `jid`
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.diff_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return diff(config, num_pre=pre_snapshot, num_post=post_snapshot)
|
saltstack/salt
|
salt/states/environ.py
|
setenv
|
python
|
def setenv(name,
value,
false_unsets=False,
clear_all=False,
update_minion=False,
permanent=False):
'''
Set the salt process environment variables.
name
The environment key to set. Must be a string.
value
Either a string or dict. When string, it will be the value
set for the environment key of 'name' above.
When a dict, each key/value pair represents an environment
variable to set.
false_unsets
If a key's value is False and false_unsets is True, then the
key will be removed from the salt processes environment dict
entirely. If a key's value is False and false_unsets is not
True, then the key's value will be set to an empty string.
Default: False
clear_all
USE WITH CAUTION! This option can unset environment variables
needed for salt to function properly.
If clear_all is True, then any environment variables not
defined in the environ dict will be deleted.
Default: False
update_minion
If True, apply these environ changes to the main salt-minion
process. If False, the environ changes will only affect the
current salt subprocess.
Default: False
permanent
On Windows minions this will set the environment variable in the
registry so that it is always added as a environment variable when
applications open. If you want to set the variable to HKLM instead of
HKCU just pass in "HKLM" for this parameter. On all other minion types
this will be ignored. Note: This will only take affect on applications
opened after this has been set.
Example:
.. code-block:: yaml
a_string_env:
environ.setenv:
- name: foo
- value: bar
- update_minion: True
a_dict_env:
environ.setenv:
- name: does_not_matter
- value:
foo: bar
baz: quux
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
environ = {}
if isinstance(value, six.string_types) or value is False:
environ[name] = value
elif isinstance(value, dict):
environ = value
else:
ret['result'] = False
ret['comment'] = 'Environ value must be string, dict or False'
return ret
if clear_all is True:
# Any keys not in 'environ' dict supplied by user will be unset
to_unset = [key for key in os.environ if key not in environ]
for key in to_unset:
if false_unsets is not True:
# This key value will change to ''
ret['changes'].update({key: ''})
else:
# We're going to delete the key
ret['changes'].update({key: None})
current_environ = dict(os.environ)
already_set = []
for key, val in six.iteritems(environ):
if val is False:
# We unset this key from the environment if
# false_unsets is True. Otherwise we want to set
# the value to ''
def key_exists():
if salt.utils.platform.is_windows():
permanent_hive = 'HKCU'
permanent_key = 'Environment'
if permanent == 'HKLM':
permanent_hive = 'HKLM'
permanent_key = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
out = __utils__['reg.read_value'](permanent_hive, permanent_key, _norm_key(key))
return out['success'] is True
else:
return False
if current_environ.get(_norm_key(key), None) is None and not key_exists():
# The key does not exist in environment
if false_unsets is not True:
# This key will be added with value ''
ret['changes'].update({key: ''})
else:
# The key exists.
if false_unsets is not True:
# Check to see if the value will change
if current_environ.get(_norm_key(key), None) != '':
# This key value will change to ''
ret['changes'].update({key: ''})
else:
# We're going to delete the key
ret['changes'].update({key: None})
elif current_environ.get(_norm_key(key), '') == val:
already_set.append(key)
else:
ret['changes'].update({key: val})
if __opts__['test']:
if ret['changes']:
ret['comment'] = 'Environ values will be changed'
else:
ret['comment'] = 'Environ values are already set with the correct values'
return ret
if ret['changes']:
environ_ret = __salt__['environ.setenv'](environ,
false_unsets,
clear_all,
update_minion,
permanent)
if not environ_ret:
ret['result'] = False
ret['comment'] = 'Failed to set environ variables'
return ret
ret['result'] = True
ret['changes'] = environ_ret
ret['comment'] = 'Environ values were set'
else:
ret['comment'] = 'Environ values were already set with the correct values'
return ret
|
Set the salt process environment variables.
name
The environment key to set. Must be a string.
value
Either a string or dict. When string, it will be the value
set for the environment key of 'name' above.
When a dict, each key/value pair represents an environment
variable to set.
false_unsets
If a key's value is False and false_unsets is True, then the
key will be removed from the salt processes environment dict
entirely. If a key's value is False and false_unsets is not
True, then the key's value will be set to an empty string.
Default: False
clear_all
USE WITH CAUTION! This option can unset environment variables
needed for salt to function properly.
If clear_all is True, then any environment variables not
defined in the environ dict will be deleted.
Default: False
update_minion
If True, apply these environ changes to the main salt-minion
process. If False, the environ changes will only affect the
current salt subprocess.
Default: False
permanent
On Windows minions this will set the environment variable in the
registry so that it is always added as a environment variable when
applications open. If you want to set the variable to HKLM instead of
HKCU just pass in "HKLM" for this parameter. On all other minion types
this will be ignored. Note: This will only take affect on applications
opened after this has been set.
Example:
.. code-block:: yaml
a_string_env:
environ.setenv:
- name: foo
- value: bar
- update_minion: True
a_dict_env:
environ.setenv:
- name: does_not_matter
- value:
foo: bar
baz: quux
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/environ.py#L34-L184
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _norm_key(key):\n '''\n Normalize windows environment keys\n '''\n if salt.utils.platform.is_windows():\n return key.upper()\n return key\n",
"def key_exists():\n if salt.utils.platform.is_windows():\n permanent_hive = 'HKCU'\n permanent_key = 'Environment'\n if permanent == 'HKLM':\n permanent_hive = 'HKLM'\n permanent_key = r'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment'\n\n out = __utils__['reg.read_value'](permanent_hive, permanent_key, _norm_key(key))\n return out['success'] is True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for getting and setting the environment variables
of the current salt process.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import Salt libs
import salt.utils.platform
# Import 3rd-party libs
from salt.ext import six
def __virtual__():
'''
No dependency checks, and not renaming, just return True
'''
return True
def _norm_key(key):
'''
Normalize windows environment keys
'''
if salt.utils.platform.is_windows():
return key.upper()
return key
|
saltstack/salt
|
salt/modules/znc.py
|
_makepass
|
python
|
def _makepass(password, hasher='sha256'):
'''
Create a znc compatible hashed password
'''
# Setup the hasher
if hasher == 'sha256':
h = hashlib.sha256(password)
elif hasher == 'md5':
h = hashlib.md5(password)
else:
return NotImplemented
c = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"0123456789!?.,:;/*-+_()"
r = {
'Method': h.name,
'Salt': ''.join(random.SystemRandom().choice(c) for x in range(20)),
}
# Salt the password hash
h.update(r['Salt'])
r['Hash'] = h.hexdigest()
return r
|
Create a znc compatible hashed password
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/znc.py#L34-L58
| null |
# -*- coding: utf-8 -*-
'''
znc - An advanced IRC bouncer
.. versionadded:: 2014.7.0
Provides an interface to basic ZNC functionality
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import hashlib
import logging
import os.path
import random
import signal
# Import salt libs
import salt.utils.path
from salt.ext.six.moves import range
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if znc is installed
'''
if salt.utils.path.which('znc'):
return 'znc'
return (False, "Module znc: znc binary not found")
def buildmod(*modules):
'''
Build module using znc-buildmod
CLI Example:
.. code-block:: bash
salt '*' znc.buildmod module.cpp [...]
'''
# Check if module files are missing
missing = [module for module in modules if not os.path.exists(module)]
if missing:
return 'Error: The file ({0}) does not exist.'.format(', '.join(missing))
cmd = ['znc-buildmod']
cmd.extend(modules)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return out[-1]
def dumpconf():
'''
Write the active configuration state to config file
CLI Example:
.. code-block:: bash
salt '*' znc.dumpconf
'''
return __salt__['ps.pkill']('znc', signal=signal.SIGUSR1)
def rehashconf():
'''
Rehash the active configuration state from config file
CLI Example:
.. code-block:: bash
salt '*' znc.rehashconf
'''
return __salt__['ps.pkill']('znc', signal=signal.SIGHUP)
def version():
'''
Return server version from znc --version
CLI Example:
.. code-block:: bash
salt '*' znc.version
'''
cmd = ['znc', '--version']
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
ret = out[0].split(' - ')
return ret[0]
|
saltstack/salt
|
salt/modules/znc.py
|
buildmod
|
python
|
def buildmod(*modules):
'''
Build module using znc-buildmod
CLI Example:
.. code-block:: bash
salt '*' znc.buildmod module.cpp [...]
'''
# Check if module files are missing
missing = [module for module in modules if not os.path.exists(module)]
if missing:
return 'Error: The file ({0}) does not exist.'.format(', '.join(missing))
cmd = ['znc-buildmod']
cmd.extend(modules)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return out[-1]
|
Build module using znc-buildmod
CLI Example:
.. code-block:: bash
salt '*' znc.buildmod module.cpp [...]
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/znc.py#L61-L79
| null |
# -*- coding: utf-8 -*-
'''
znc - An advanced IRC bouncer
.. versionadded:: 2014.7.0
Provides an interface to basic ZNC functionality
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import hashlib
import logging
import os.path
import random
import signal
# Import salt libs
import salt.utils.path
from salt.ext.six.moves import range
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if znc is installed
'''
if salt.utils.path.which('znc'):
return 'znc'
return (False, "Module znc: znc binary not found")
def _makepass(password, hasher='sha256'):
'''
Create a znc compatible hashed password
'''
# Setup the hasher
if hasher == 'sha256':
h = hashlib.sha256(password)
elif hasher == 'md5':
h = hashlib.md5(password)
else:
return NotImplemented
c = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"0123456789!?.,:;/*-+_()"
r = {
'Method': h.name,
'Salt': ''.join(random.SystemRandom().choice(c) for x in range(20)),
}
# Salt the password hash
h.update(r['Salt'])
r['Hash'] = h.hexdigest()
return r
def dumpconf():
'''
Write the active configuration state to config file
CLI Example:
.. code-block:: bash
salt '*' znc.dumpconf
'''
return __salt__['ps.pkill']('znc', signal=signal.SIGUSR1)
def rehashconf():
'''
Rehash the active configuration state from config file
CLI Example:
.. code-block:: bash
salt '*' znc.rehashconf
'''
return __salt__['ps.pkill']('znc', signal=signal.SIGHUP)
def version():
'''
Return server version from znc --version
CLI Example:
.. code-block:: bash
salt '*' znc.version
'''
cmd = ['znc', '--version']
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
ret = out[0].split(' - ')
return ret[0]
|
saltstack/salt
|
salt/modules/znc.py
|
version
|
python
|
def version():
'''
Return server version from znc --version
CLI Example:
.. code-block:: bash
salt '*' znc.version
'''
cmd = ['znc', '--version']
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
ret = out[0].split(' - ')
return ret[0]
|
Return server version from znc --version
CLI Example:
.. code-block:: bash
salt '*' znc.version
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/znc.py#L108-L121
| null |
# -*- coding: utf-8 -*-
'''
znc - An advanced IRC bouncer
.. versionadded:: 2014.7.0
Provides an interface to basic ZNC functionality
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import hashlib
import logging
import os.path
import random
import signal
# Import salt libs
import salt.utils.path
from salt.ext.six.moves import range
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if znc is installed
'''
if salt.utils.path.which('znc'):
return 'znc'
return (False, "Module znc: znc binary not found")
def _makepass(password, hasher='sha256'):
'''
Create a znc compatible hashed password
'''
# Setup the hasher
if hasher == 'sha256':
h = hashlib.sha256(password)
elif hasher == 'md5':
h = hashlib.md5(password)
else:
return NotImplemented
c = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"0123456789!?.,:;/*-+_()"
r = {
'Method': h.name,
'Salt': ''.join(random.SystemRandom().choice(c) for x in range(20)),
}
# Salt the password hash
h.update(r['Salt'])
r['Hash'] = h.hexdigest()
return r
def buildmod(*modules):
'''
Build module using znc-buildmod
CLI Example:
.. code-block:: bash
salt '*' znc.buildmod module.cpp [...]
'''
# Check if module files are missing
missing = [module for module in modules if not os.path.exists(module)]
if missing:
return 'Error: The file ({0}) does not exist.'.format(', '.join(missing))
cmd = ['znc-buildmod']
cmd.extend(modules)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return out[-1]
def dumpconf():
'''
Write the active configuration state to config file
CLI Example:
.. code-block:: bash
salt '*' znc.dumpconf
'''
return __salt__['ps.pkill']('znc', signal=signal.SIGUSR1)
def rehashconf():
'''
Rehash the active configuration state from config file
CLI Example:
.. code-block:: bash
salt '*' znc.rehashconf
'''
return __salt__['ps.pkill']('znc', signal=signal.SIGHUP)
|
saltstack/salt
|
salt/returners/odbc.py
|
_get_conn
|
python
|
def _get_conn(ret=None):
'''
Return a MSSQL connection.
'''
_options = _get_options(ret)
dsn = _options.get('dsn')
user = _options.get('user')
passwd = _options.get('passwd')
return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(
dsn,
user,
passwd))
|
Return a MSSQL connection.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/odbc.py#L169-L181
|
[
"def _get_options(ret=None):\n '''\n Get the odbc options from salt.\n '''\n attrs = {'dsn': 'dsn',\n 'user': 'user',\n 'passwd': 'passwd'}\n\n _options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),\n ret,\n attrs,\n __salt__=__salt__,\n __opts__=__opts__)\n return _options\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham (cr@saltstack.com)
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.jid
import salt.utils.json
import salt.returners
# FIXME We'll need to handle this differently for Windows.
# Import third party libs
try:
import pyodbc
#import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = 'odbc'
def __virtual__():
if not HAS_ODBC:
return False, 'Could not import odbc returner; pyodbc is not installed.'
return True
def _get_options(ret=None):
'''
Get the odbc options from salt.
'''
attrs = {'dsn': 'dsn',
'user': 'user',
'passwd': 'passwd'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _close_conn(conn):
'''
Close the MySQL connection
'''
conn.commit()
conn.close()
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn)
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (?, ?)'''
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = ?;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return salt.utils.json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret
def get_jids():
'''
Return a list of all job ids
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT distinct jid, load FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/odbc.py
|
returner
|
python
|
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn)
|
Return data to an odbc server
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/odbc.py#L192-L211
|
[
"def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n",
"def _get_conn(ret=None):\n '''\n Return a MSSQL connection.\n '''\n _options = _get_options(ret)\n dsn = _options.get('dsn')\n user = _options.get('user')\n passwd = _options.get('passwd')\n\n return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(\n dsn,\n user,\n passwd))\n",
"def _close_conn(conn):\n '''\n Close the MySQL connection\n '''\n conn.commit()\n conn.close()\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham (cr@saltstack.com)
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.jid
import salt.utils.json
import salt.returners
# FIXME We'll need to handle this differently for Windows.
# Import third party libs
try:
import pyodbc
#import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = 'odbc'
def __virtual__():
if not HAS_ODBC:
return False, 'Could not import odbc returner; pyodbc is not installed.'
return True
def _get_options(ret=None):
'''
Get the odbc options from salt.
'''
attrs = {'dsn': 'dsn',
'user': 'user',
'passwd': 'passwd'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret=None):
'''
Return a MSSQL connection.
'''
_options = _get_options(ret)
dsn = _options.get('dsn')
user = _options.get('user')
passwd = _options.get('passwd')
return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(
dsn,
user,
passwd))
def _close_conn(conn):
'''
Close the MySQL connection
'''
conn.commit()
conn.close()
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (?, ?)'''
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = ?;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return salt.utils.json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret
def get_jids():
'''
Return a list of all job ids
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT distinct jid, load FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/odbc.py
|
get_jid
|
python
|
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret
|
Return the information returned when the specified job id was executed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/odbc.py#L249-L264
|
[
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n",
"def _get_conn(ret=None):\n '''\n Return a MSSQL connection.\n '''\n _options = _get_options(ret)\n dsn = _options.get('dsn')\n user = _options.get('user')\n passwd = _options.get('passwd')\n\n return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(\n dsn,\n user,\n passwd))\n",
"def _close_conn(conn):\n '''\n Close the MySQL connection\n '''\n conn.commit()\n conn.close()\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham (cr@saltstack.com)
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.jid
import salt.utils.json
import salt.returners
# FIXME We'll need to handle this differently for Windows.
# Import third party libs
try:
import pyodbc
#import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = 'odbc'
def __virtual__():
if not HAS_ODBC:
return False, 'Could not import odbc returner; pyodbc is not installed.'
return True
def _get_options(ret=None):
'''
Get the odbc options from salt.
'''
attrs = {'dsn': 'dsn',
'user': 'user',
'passwd': 'passwd'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret=None):
'''
Return a MSSQL connection.
'''
_options = _get_options(ret)
dsn = _options.get('dsn')
user = _options.get('user')
passwd = _options.get('passwd')
return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(
dsn,
user,
passwd))
def _close_conn(conn):
'''
Close the MySQL connection
'''
conn.commit()
conn.close()
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn)
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (?, ?)'''
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = ?;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return salt.utils.json.loads(data)
_close_conn(conn)
return {}
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret
def get_jids():
'''
Return a list of all job ids
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT distinct jid, load FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/odbc.py
|
get_fun
|
python
|
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret
|
Return a dict of the last function called for all minions
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/odbc.py#L267-L288
|
[
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n",
"def _get_conn(ret=None):\n '''\n Return a MSSQL connection.\n '''\n _options = _get_options(ret)\n dsn = _options.get('dsn')\n user = _options.get('user')\n passwd = _options.get('passwd')\n\n return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(\n dsn,\n user,\n passwd))\n",
"def _close_conn(conn):\n '''\n Close the MySQL connection\n '''\n conn.commit()\n conn.close()\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham (cr@saltstack.com)
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.jid
import salt.utils.json
import salt.returners
# FIXME We'll need to handle this differently for Windows.
# Import third party libs
try:
import pyodbc
#import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = 'odbc'
def __virtual__():
if not HAS_ODBC:
return False, 'Could not import odbc returner; pyodbc is not installed.'
return True
def _get_options(ret=None):
'''
Get the odbc options from salt.
'''
attrs = {'dsn': 'dsn',
'user': 'user',
'passwd': 'passwd'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret=None):
'''
Return a MSSQL connection.
'''
_options = _get_options(ret)
dsn = _options.get('dsn')
user = _options.get('user')
passwd = _options.get('passwd')
return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(
dsn,
user,
passwd))
def _close_conn(conn):
'''
Close the MySQL connection
'''
conn.commit()
conn.close()
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn)
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (?, ?)'''
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = ?;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return salt.utils.json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret
def get_jids():
'''
Return a list of all job ids
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT distinct jid, load FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/odbc.py
|
get_jids
|
python
|
def get_jids():
'''
Return a list of all job ids
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT distinct jid, load FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
_close_conn(conn)
return ret
|
Return a list of all job ids
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/odbc.py#L291-L305
|
[
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n",
"def _get_conn(ret=None):\n '''\n Return a MSSQL connection.\n '''\n _options = _get_options(ret)\n dsn = _options.get('dsn')\n user = _options.get('user')\n passwd = _options.get('passwd')\n\n return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(\n dsn,\n user,\n passwd))\n",
"def format_jid_instance(jid, job):\n '''\n Format the jid correctly\n '''\n ret = format_job_instance(job)\n ret.update({'StartTime': jid_to_time(jid)})\n return ret\n",
"def _close_conn(conn):\n '''\n Close the MySQL connection\n '''\n conn.commit()\n conn.close()\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham (cr@saltstack.com)
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.jid
import salt.utils.json
import salt.returners
# FIXME We'll need to handle this differently for Windows.
# Import third party libs
try:
import pyodbc
#import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = 'odbc'
def __virtual__():
if not HAS_ODBC:
return False, 'Could not import odbc returner; pyodbc is not installed.'
return True
def _get_options(ret=None):
'''
Get the odbc options from salt.
'''
attrs = {'dsn': 'dsn',
'user': 'user',
'passwd': 'passwd'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret=None):
'''
Return a MSSQL connection.
'''
_options = _get_options(ret)
dsn = _options.get('dsn')
user = _options.get('user')
passwd = _options.get('passwd')
return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(
dsn,
user,
passwd))
def _close_conn(conn):
'''
Close the MySQL connection
'''
conn.commit()
conn.close()
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret['success'],
salt.utils.json.dumps(ret)
)
)
_close_conn(conn)
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (?, ?)'''
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = ?;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return salt.utils.json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/states/smartos.py
|
_load_config
|
python
|
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
|
Loads and parses /usbkey/config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L185-L202
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
_write_config
|
python
|
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
|
writes /usbkey/config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L205-L225
|
[
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n",
"def atomic_open(filename, mode='w'):\n '''\n Works like a regular `open()` but writes updates into a temporary\n file instead of the given file and moves it over when the file is\n closed. The file returned behaves as if it was a regular Python\n '''\n if mode in ('r', 'rb', 'r+', 'rb+', 'a', 'ab'):\n raise TypeError('Read or append modes don\\'t work with atomic_open')\n kwargs = {\n 'prefix': '.___atomic_write',\n 'dir': os.path.dirname(filename),\n 'delete': False,\n }\n if six.PY3 and 'b' not in mode:\n kwargs['newline'] = ''\n ntf = tempfile.NamedTemporaryFile(mode, **kwargs)\n return _AtomicWFile(ntf, ntf.name, filename)\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
_parse_vmconfig
|
python
|
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
|
Parse vm_present vm config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L228-L253
| null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
_get_instance_changes
|
python
|
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
|
get modified properties
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L256-L272
| null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
config_present
|
python
|
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
|
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L329-L376
|
[
"def _load_config():\n '''\n Loads and parses /usbkey/config\n '''\n config = {}\n\n if os.path.isfile('/usbkey/config'):\n with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:\n for optval in config_file:\n optval = salt.utils.stringutils.to_unicode(optval)\n if optval[0] == '#':\n continue\n if '=' not in optval:\n continue\n optval = optval.split('=')\n config[optval[0].lower()] = optval[1].strip().strip('\"')\n log.debug('smartos.config - read /usbkey/config: %s', config)\n return config\n",
"def _write_config(config):\n '''\n writes /usbkey/config\n '''\n try:\n with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:\n config_file.write(\"#\\n# This file was generated by salt\\n#\\n\")\n for prop in salt.utils.odict.OrderedDict(sorted(config.items())):\n if ' ' in six.text_type(config[prop]):\n if not config[prop].startswith('\"') or not config[prop].endswith('\"'):\n config[prop] = '\"{0}\"'.format(config[prop])\n config_file.write(\n salt.utils.stringutils.to_str(\n \"{0}={1}\\n\".format(prop, config[prop])\n )\n )\n log.debug('smartos.config - wrote /usbkey/config: %s', config)\n except IOError:\n return False\n\n return True\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
config_absent
|
python
|
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
|
Ensure configuration property is absent in /usbkey/config
name : string
name of property
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L379-L411
|
[
"def _load_config():\n '''\n Loads and parses /usbkey/config\n '''\n config = {}\n\n if os.path.isfile('/usbkey/config'):\n with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:\n for optval in config_file:\n optval = salt.utils.stringutils.to_unicode(optval)\n if optval[0] == '#':\n continue\n if '=' not in optval:\n continue\n optval = optval.split('=')\n config[optval[0].lower()] = optval[1].strip().strip('\"')\n log.debug('smartos.config - read /usbkey/config: %s', config)\n return config\n",
"def _write_config(config):\n '''\n writes /usbkey/config\n '''\n try:\n with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:\n config_file.write(\"#\\n# This file was generated by salt\\n#\\n\")\n for prop in salt.utils.odict.OrderedDict(sorted(config.items())):\n if ' ' in six.text_type(config[prop]):\n if not config[prop].startswith('\"') or not config[prop].endswith('\"'):\n config[prop] = '\"{0}\"'.format(config[prop])\n config_file.write(\n salt.utils.stringutils.to_str(\n \"{0}={1}\\n\".format(prop, config[prop])\n )\n )\n log.debug('smartos.config - wrote /usbkey/config: %s', config)\n except IOError:\n return False\n\n return True\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
source_present
|
python
|
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
|
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L414-L449
| null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
source_absent
|
python
|
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
|
Ensure an image source is absent on the computenode
name : string
source url
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L452-L485
| null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
image_present
|
python
|
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
|
Ensure image is present on the computenode
name : string
uuid of image
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L488-L542
|
[
"def _is_uuid(uuid):\n '''\n Check if uuid is a valid smartos uuid\n\n Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8\n '''\n if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:\n return True\n return False\n",
"def _is_docker_uuid(uuid):\n '''\n Check if uuid is a valid smartos docker uuid\n\n Example plexinc/pms-docker:plexpass\n '''\n repo, tag = _split_docker_uuid(uuid)\n return not (not repo and not tag)\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
image_absent
|
python
|
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
|
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L545-L604
|
[
"def _is_uuid(uuid):\n '''\n Check if uuid is a valid smartos uuid\n\n Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8\n '''\n if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:\n return True\n return False\n",
"def _is_docker_uuid(uuid):\n '''\n Check if uuid is a valid smartos docker uuid\n\n Example plexinc/pms-docker:plexpass\n '''\n repo, tag = _split_docker_uuid(uuid)\n return not (not repo and not tag)\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
image_vacuum
|
python
|
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
|
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L607-L685
|
[
"def _is_uuid(uuid):\n '''\n Check if uuid is a valid smartos uuid\n\n Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8\n '''\n if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:\n return True\n return False\n",
"def _is_docker_uuid(uuid):\n '''\n Check if uuid is a valid smartos docker uuid\n\n Example plexinc/pms-docker:plexpass\n '''\n repo, tag = _split_docker_uuid(uuid)\n return not (not repo and not tag)\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
vm_present
|
python
|
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
|
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L688-L1120
|
[
"def _parse_vmconfig(config, instances):\n '''\n Parse vm_present vm config\n '''\n vmconfig = None\n\n if isinstance(config, (salt.utils.odict.OrderedDict)):\n vmconfig = salt.utils.odict.OrderedDict()\n for prop in config:\n if prop not in instances:\n vmconfig[prop] = config[prop]\n else:\n if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):\n continue\n vmconfig[prop] = []\n for instance in config[prop]:\n instance_config = config[prop][instance]\n instance_config[instances[prop]] = instance\n ## some property are lowercase\n if 'mac' in instance_config:\n instance_config['mac'] = instance_config['mac'].lower()\n vmconfig[prop].append(instance_config)\n else:\n log.error('smartos.vm_present::parse_vmconfig - failed to parse')\n\n return vmconfig\n",
"def _get_instance_changes(current, state):\n '''\n get modified properties\n '''\n # get keys\n current_keys = set(current.keys())\n state_keys = set(state.keys())\n\n # compare configs\n changed = salt.utils.data.compare_dicts(current, state)\n for change in salt.utils.data.compare_dicts(current, state):\n if change in changed and changed[change]['old'] == \"\":\n del changed[change]\n if change in changed and changed[change]['new'] == \"\":\n del changed[change]\n\n return changed\n",
"def _copy_lx_vars(vmconfig):\n # NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md\n if 'image_uuid' in vmconfig:\n # NOTE: retrieve tags and type from image\n imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})\n imgtype = imgconfig.get('type', 'zone-dataset')\n imgtags = imgconfig.get('tags', {})\n\n # NOTE: copy kernel_version (if not specified in vmconfig)\n if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:\n vmconfig['kernel_version'] = imgtags['kernel_version']\n\n # NOTE: copy docker vars\n if imgtype == 'docker':\n vmconfig['docker'] = True\n vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')\n if 'internal_metadata' not in vmconfig:\n vmconfig['internal_metadata'] = {}\n\n for var in imgtags.get('docker:config', {}):\n val = imgtags['docker:config'][var]\n var = 'docker:{0}'.format(var.lower())\n\n # NOTE: skip empty values\n if not val:\n continue\n\n # NOTE: skip or merge user values\n if var == 'docker:env':\n try:\n val_config = json.loads(\n vmconfig['internal_metadata'].get(var, \"\")\n )\n except ValueError as e:\n val_config = []\n\n for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):\n config_env_var = config_env_var.split('=')\n for img_env_var in val:\n if img_env_var.startswith('{0}='.format(config_env_var[0])):\n val.remove(img_env_var)\n val.append('='.join(config_env_var))\n elif var in vmconfig['internal_metadata']:\n continue\n\n if isinstance(val, list):\n # NOTE: string-encoded JSON arrays\n vmconfig['internal_metadata'][var] = json.dumps(val)\n else:\n vmconfig['internal_metadata'][var] = val\n\n return vmconfig\n"
] |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
vm_absent
|
python
|
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
|
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L1123-L1165
| null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/smartos.py
|
vm_running
|
python
|
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
|
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L1168-L1200
| null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
docker.example.org:
smartos.vm_present:
- config:
auto_import: true
reprovision: true
- vmconfig:
image_uuid: emby/embyserver:latest
brand: lx
alias: mydockervm
quota: 5
max_physical_memory: 1024
tags:
label: 'my emby docker'
owner: 'sjorge'
resolvers:
- 172.16.1.1
nics:
"82:1b:8e:49:e9:18":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.118/24
vlan_id: 10
filesystems:
"/config:
source: "/vmdata/emby_config"
type: lofs
options:
- nodevices
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import json
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def _copy_lx_vars(vmconfig):
# NOTE: documentation on dockerinit: https://github.com/joyent/smartos-live/blob/master/src/dockerinit/README.md
if 'image_uuid' in vmconfig:
# NOTE: retrieve tags and type from image
imgconfig = __salt__['imgadm.get'](vmconfig['image_uuid']).get('manifest', {})
imgtype = imgconfig.get('type', 'zone-dataset')
imgtags = imgconfig.get('tags', {})
# NOTE: copy kernel_version (if not specified in vmconfig)
if 'kernel_version' not in vmconfig and 'kernel_version' in imgtags:
vmconfig['kernel_version'] = imgtags['kernel_version']
# NOTE: copy docker vars
if imgtype == 'docker':
vmconfig['docker'] = True
vmconfig['kernel_version'] = vmconfig.get('kernel_version', '4.3.0')
if 'internal_metadata' not in vmconfig:
vmconfig['internal_metadata'] = {}
for var in imgtags.get('docker:config', {}):
val = imgtags['docker:config'][var]
var = 'docker:{0}'.format(var.lower())
# NOTE: skip empty values
if not val:
continue
# NOTE: skip or merge user values
if var == 'docker:env':
try:
val_config = json.loads(
vmconfig['internal_metadata'].get(var, "")
)
except ValueError as e:
val_config = []
for config_env_var in val_config if isinstance(val_config, list) else json.loads(val_config):
config_env_var = config_env_var.split('=')
for img_env_var in val:
if img_env_var.startswith('{0}='.format(config_env_var[0])):
val.remove(img_env_var)
val.append('='.join(config_env_var))
elif var in vmconfig['internal_metadata']:
continue
if isinstance(val, list):
# NOTE: string-encoded JSON arrays
vmconfig['internal_metadata'][var] = json.dumps(val)
else:
vmconfig['internal_metadata'][var] = val
return vmconfig
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- auto_lx_vars (true) - copy kernel_version and docker:* variables from image
- reprovision (false) - reprovision on image_uuid changes
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
- enforce_routes (true) - false = add tags only, true = add, update, and remove routes
- enforce_internal_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
.. versionchanged:: 2019.2.0
Added support for docker image uuids, added auto_lx_vars configuration, documented some missing configuration options.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'auto_lx_vars': True,
'reprovision': False,
'enforce_tags': True,
'enforce_routes': True,
'enforce_internal_metadata': True,
'enforce_customer_metadata': True,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
vmconfig_docker_keep = [
'docker:id',
'docker:restartcount',
]
vmconfig_docker_array = [
'docker:env',
'docker:cmd',
'docker:entrypoint',
]
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# prepare image_uuid
if 'image_uuid' in vmconfig:
# NOTE: lookup uuid from docker uuid (normal uuid's are passed throuhg unmodified)
# we must do this again if we end up importing a missing image later!
docker_uuid = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = docker_uuid if docker_uuid else vmconfig['image_uuid']
# NOTE: import image (if missing and allowed)
if vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](vmconfig['image_uuid'])
vmconfig['image_uuid'] = __salt__['imgadm.docker_to_uuid'](vmconfig['image_uuid'])
if vmconfig['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# prepare disk.*.image_uuid
for disk in vmconfig['disks'] if 'disks' in vmconfig else []:
if 'image_uuid' in disk and disk['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
if not __opts__['test']:
res = __salt__['imgadm.import'](disk['image_uuid'])
if disk['image_uuid'] not in res:
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(disk['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(disk['image_uuid'])
# docker json-array handling
if 'internal_metadata' in vmconfig:
for var in vmconfig_docker_array:
if var not in vmconfig['internal_metadata']:
continue
if isinstance(vmconfig['internal_metadata'][var], list):
vmconfig['internal_metadata'][var] = json.dumps(
vmconfig['internal_metadata'][var]
)
# copy lx variables
if vmconfig['brand'] == 'lx' and config['auto_lx_vars']:
# NOTE: we can only copy the lx vars after the image has bene imported
vmconfig = _copy_lx_vars(vmconfig)
# quick abort if things look wrong
# NOTE: use explicit check for false, otherwise None also matches!
if ret['result'] is False:
return ret
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# dockerinit handling
if collection == 'internal_metadata' and vmconfig['state'].get('docker', False):
if 'internal_metadata' not in vmconfig['state']:
vmconfig['state']['internal_metadata'] = {}
# preserve some docker specific metadata (added and needed by dockerinit)
for var in vmconfig_docker_keep:
val = vmconfig['current'].get(collection, {}).get(var, None)
if val is not None:
vmconfig['state']['internal_metadata'][var] = val
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
# reprovision (if required and allowed)
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
saltstack/salt
|
salt/states/ports.py
|
_repack_options
|
python
|
def _repack_options(options):
'''
Repack the options data
'''
return dict(
[
(six.text_type(x), _normalize(y))
for x, y in six.iteritems(salt.utils.data.repack_dictlist(options))
]
)
|
Repack the options data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ports.py#L42-L51
| null |
# -*- coding: utf-8 -*-
'''
Manage software from FreeBSD ports
.. versionadded:: 2014.1.0
.. note::
It may be helpful to use a higher timeout when running a
:mod:`ports.installed <salt.states.ports>` state, since compiling the port
may exceed Salt's timeout.
.. code-block:: bash
salt -t 1200 '*' state.highstate
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import copy
import logging
import sys
# Import salt libs
import salt.utils.data
from salt.exceptions import SaltInvocationError, CommandExecutionError
from salt.modules.freebsdports import _normalize, _options_file_exists
# Needed by imported function _options_file_exists
import os # pylint: disable=W0611
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
if __grains__.get('os', '') == 'FreeBSD' and 'ports.install' in __salt__:
return 'ports'
return False
def _get_option_list(options):
'''
Returns the key/value pairs in the passed dict in a commaspace-delimited
list in the format "key=value".
'''
return ', '.join(['{0}={1}'.format(x, y) for x, y in six.iteritems(options)])
def _build_option_string(options):
'''
Common function to get a string to append to the end of the state comment
'''
if options:
return ('with the following build options: {0}'
.format(_get_option_list(options)))
else:
return 'with the default build options'
def installed(name, options=None):
'''
Verify that the desired port is installed, and that it was compiled with
the desired options.
options
Make sure that the desired non-default options are set
.. warning::
Any build options not passed here assume the default values for the
port, and are not just differences from the existing cached options
from a previous ``make config``.
Example usage:
.. code-block:: yaml
security/nmap:
ports.installed:
- options:
- IPV6: off
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': '{0} is already installed'.format(name)}
try:
current_options = __salt__['ports.showconfig'](name, default=False,
dict_return=True)
default_options = __salt__['ports.showconfig'](name, default=True,
dict_return=True)
# unpack the options from the top-level return dict
if current_options:
current_options = current_options[next(iter(current_options))]
if default_options:
default_options = default_options[next(iter(default_options))]
except (SaltInvocationError, CommandExecutionError) as exc:
ret['result'] = False
ret['comment'] = ('Unable to get configuration for {0}. Port name may '
'be invalid, or ports tree may need to be updated. '
'Error message: {1}'.format(name, exc))
return ret
options = _repack_options(options) if options is not None else {}
desired_options = copy.deepcopy(default_options)
desired_options.update(options)
ports_pre = [
x['origin'] for x in
six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True))
]
if current_options == desired_options and name in ports_pre:
# Port is installed as desired
if options:
ret['comment'] += ' ' + _build_option_string(options)
return ret
if not default_options:
if options:
ret['result'] = False
ret['comment'] = ('{0} does not have any build options, yet '
'options were specified'.format(name))
return ret
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} will be installed'.format(name)
return ret
else:
bad_opts = [x for x in options if x not in default_options]
if bad_opts:
ret['result'] = False
ret['comment'] = ('The following options are not available for '
'{0}: {1}'.format(name, ', '.join(bad_opts)))
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} will be installed '.format(name)
ret['comment'] += _build_option_string(options)
return ret
if options:
if not __salt__['ports.config'](name, reset=True, **options):
ret['result'] = False
ret['comment'] = 'Unable to set options for {0}'.format(name)
return ret
else:
__salt__['ports.rmconfig'](name)
if _options_file_exists(name):
ret['result'] = False
ret['comment'] = 'Unable to clear options for {0}'.format(name)
return ret
ret['changes'] = __salt__['ports.install'](name)
ports_post = [
x['origin'] for x in
six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True))
]
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ports.install_error', None)
if err or name not in ports_post:
ret['result'] = False
if ret['result']:
ret['comment'] = 'Successfully installed {0}'.format(name)
if default_options:
ret['comment'] += ' ' + _build_option_string(options)
else:
ret['comment'] = 'Failed to install {0}'.format(name)
if err:
ret['comment'] += '. Error message:\n{0}'.format(err)
return ret
|
saltstack/salt
|
salt/states/ports.py
|
_get_option_list
|
python
|
def _get_option_list(options):
'''
Returns the key/value pairs in the passed dict in a commaspace-delimited
list in the format "key=value".
'''
return ', '.join(['{0}={1}'.format(x, y) for x, y in six.iteritems(options)])
|
Returns the key/value pairs in the passed dict in a commaspace-delimited
list in the format "key=value".
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ports.py#L54-L59
| null |
# -*- coding: utf-8 -*-
'''
Manage software from FreeBSD ports
.. versionadded:: 2014.1.0
.. note::
It may be helpful to use a higher timeout when running a
:mod:`ports.installed <salt.states.ports>` state, since compiling the port
may exceed Salt's timeout.
.. code-block:: bash
salt -t 1200 '*' state.highstate
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import copy
import logging
import sys
# Import salt libs
import salt.utils.data
from salt.exceptions import SaltInvocationError, CommandExecutionError
from salt.modules.freebsdports import _normalize, _options_file_exists
# Needed by imported function _options_file_exists
import os # pylint: disable=W0611
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
if __grains__.get('os', '') == 'FreeBSD' and 'ports.install' in __salt__:
return 'ports'
return False
def _repack_options(options):
'''
Repack the options data
'''
return dict(
[
(six.text_type(x), _normalize(y))
for x, y in six.iteritems(salt.utils.data.repack_dictlist(options))
]
)
def _build_option_string(options):
'''
Common function to get a string to append to the end of the state comment
'''
if options:
return ('with the following build options: {0}'
.format(_get_option_list(options)))
else:
return 'with the default build options'
def installed(name, options=None):
'''
Verify that the desired port is installed, and that it was compiled with
the desired options.
options
Make sure that the desired non-default options are set
.. warning::
Any build options not passed here assume the default values for the
port, and are not just differences from the existing cached options
from a previous ``make config``.
Example usage:
.. code-block:: yaml
security/nmap:
ports.installed:
- options:
- IPV6: off
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': '{0} is already installed'.format(name)}
try:
current_options = __salt__['ports.showconfig'](name, default=False,
dict_return=True)
default_options = __salt__['ports.showconfig'](name, default=True,
dict_return=True)
# unpack the options from the top-level return dict
if current_options:
current_options = current_options[next(iter(current_options))]
if default_options:
default_options = default_options[next(iter(default_options))]
except (SaltInvocationError, CommandExecutionError) as exc:
ret['result'] = False
ret['comment'] = ('Unable to get configuration for {0}. Port name may '
'be invalid, or ports tree may need to be updated. '
'Error message: {1}'.format(name, exc))
return ret
options = _repack_options(options) if options is not None else {}
desired_options = copy.deepcopy(default_options)
desired_options.update(options)
ports_pre = [
x['origin'] for x in
six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True))
]
if current_options == desired_options and name in ports_pre:
# Port is installed as desired
if options:
ret['comment'] += ' ' + _build_option_string(options)
return ret
if not default_options:
if options:
ret['result'] = False
ret['comment'] = ('{0} does not have any build options, yet '
'options were specified'.format(name))
return ret
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} will be installed'.format(name)
return ret
else:
bad_opts = [x for x in options if x not in default_options]
if bad_opts:
ret['result'] = False
ret['comment'] = ('The following options are not available for '
'{0}: {1}'.format(name, ', '.join(bad_opts)))
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} will be installed '.format(name)
ret['comment'] += _build_option_string(options)
return ret
if options:
if not __salt__['ports.config'](name, reset=True, **options):
ret['result'] = False
ret['comment'] = 'Unable to set options for {0}'.format(name)
return ret
else:
__salt__['ports.rmconfig'](name)
if _options_file_exists(name):
ret['result'] = False
ret['comment'] = 'Unable to clear options for {0}'.format(name)
return ret
ret['changes'] = __salt__['ports.install'](name)
ports_post = [
x['origin'] for x in
six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True))
]
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ports.install_error', None)
if err or name not in ports_post:
ret['result'] = False
if ret['result']:
ret['comment'] = 'Successfully installed {0}'.format(name)
if default_options:
ret['comment'] += ' ' + _build_option_string(options)
else:
ret['comment'] = 'Failed to install {0}'.format(name)
if err:
ret['comment'] += '. Error message:\n{0}'.format(err)
return ret
|
saltstack/salt
|
salt/states/ports.py
|
installed
|
python
|
def installed(name, options=None):
'''
Verify that the desired port is installed, and that it was compiled with
the desired options.
options
Make sure that the desired non-default options are set
.. warning::
Any build options not passed here assume the default values for the
port, and are not just differences from the existing cached options
from a previous ``make config``.
Example usage:
.. code-block:: yaml
security/nmap:
ports.installed:
- options:
- IPV6: off
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': '{0} is already installed'.format(name)}
try:
current_options = __salt__['ports.showconfig'](name, default=False,
dict_return=True)
default_options = __salt__['ports.showconfig'](name, default=True,
dict_return=True)
# unpack the options from the top-level return dict
if current_options:
current_options = current_options[next(iter(current_options))]
if default_options:
default_options = default_options[next(iter(default_options))]
except (SaltInvocationError, CommandExecutionError) as exc:
ret['result'] = False
ret['comment'] = ('Unable to get configuration for {0}. Port name may '
'be invalid, or ports tree may need to be updated. '
'Error message: {1}'.format(name, exc))
return ret
options = _repack_options(options) if options is not None else {}
desired_options = copy.deepcopy(default_options)
desired_options.update(options)
ports_pre = [
x['origin'] for x in
six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True))
]
if current_options == desired_options and name in ports_pre:
# Port is installed as desired
if options:
ret['comment'] += ' ' + _build_option_string(options)
return ret
if not default_options:
if options:
ret['result'] = False
ret['comment'] = ('{0} does not have any build options, yet '
'options were specified'.format(name))
return ret
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} will be installed'.format(name)
return ret
else:
bad_opts = [x for x in options if x not in default_options]
if bad_opts:
ret['result'] = False
ret['comment'] = ('The following options are not available for '
'{0}: {1}'.format(name, ', '.join(bad_opts)))
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} will be installed '.format(name)
ret['comment'] += _build_option_string(options)
return ret
if options:
if not __salt__['ports.config'](name, reset=True, **options):
ret['result'] = False
ret['comment'] = 'Unable to set options for {0}'.format(name)
return ret
else:
__salt__['ports.rmconfig'](name)
if _options_file_exists(name):
ret['result'] = False
ret['comment'] = 'Unable to clear options for {0}'.format(name)
return ret
ret['changes'] = __salt__['ports.install'](name)
ports_post = [
x['origin'] for x in
six.itervalues(__salt__['pkg.list_pkgs'](with_origin=True))
]
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ports.install_error', None)
if err or name not in ports_post:
ret['result'] = False
if ret['result']:
ret['comment'] = 'Successfully installed {0}'.format(name)
if default_options:
ret['comment'] += ' ' + _build_option_string(options)
else:
ret['comment'] = 'Failed to install {0}'.format(name)
if err:
ret['comment'] += '. Error message:\n{0}'.format(err)
return ret
|
Verify that the desired port is installed, and that it was compiled with
the desired options.
options
Make sure that the desired non-default options are set
.. warning::
Any build options not passed here assume the default values for the
port, and are not just differences from the existing cached options
from a previous ``make config``.
Example usage:
.. code-block:: yaml
security/nmap:
ports.installed:
- options:
- IPV6: off
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ports.py#L73-L186
|
[
"def itervalues(d, **kw):\n return d.itervalues(**kw)\n",
"def _options_file_exists(name):\n '''\n Returns True/False based on whether or not the options file for the\n specified port exists.\n '''\n return os.path.isfile(os.path.join(_options_dir(name), 'options'))\n",
"def _repack_options(options):\n '''\n Repack the options data\n '''\n return dict(\n [\n (six.text_type(x), _normalize(y))\n for x, y in six.iteritems(salt.utils.data.repack_dictlist(options))\n ]\n )\n",
"def _build_option_string(options):\n '''\n Common function to get a string to append to the end of the state comment\n '''\n if options:\n return ('with the following build options: {0}'\n .format(_get_option_list(options)))\n else:\n return 'with the default build options'\n"
] |
# -*- coding: utf-8 -*-
'''
Manage software from FreeBSD ports
.. versionadded:: 2014.1.0
.. note::
It may be helpful to use a higher timeout when running a
:mod:`ports.installed <salt.states.ports>` state, since compiling the port
may exceed Salt's timeout.
.. code-block:: bash
salt -t 1200 '*' state.highstate
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import copy
import logging
import sys
# Import salt libs
import salt.utils.data
from salt.exceptions import SaltInvocationError, CommandExecutionError
from salt.modules.freebsdports import _normalize, _options_file_exists
# Needed by imported function _options_file_exists
import os # pylint: disable=W0611
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
if __grains__.get('os', '') == 'FreeBSD' and 'ports.install' in __salt__:
return 'ports'
return False
def _repack_options(options):
'''
Repack the options data
'''
return dict(
[
(six.text_type(x), _normalize(y))
for x, y in six.iteritems(salt.utils.data.repack_dictlist(options))
]
)
def _get_option_list(options):
'''
Returns the key/value pairs in the passed dict in a commaspace-delimited
list in the format "key=value".
'''
return ', '.join(['{0}={1}'.format(x, y) for x, y in six.iteritems(options)])
def _build_option_string(options):
'''
Common function to get a string to append to the end of the state comment
'''
if options:
return ('with the following build options: {0}'
.format(_get_option_list(options)))
else:
return 'with the default build options'
|
saltstack/salt
|
salt/states/win_network.py
|
_validate
|
python
|
def _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
'''
Ensure that the configuration passed is formatted correctly and contains
valid IP addresses, etc.
'''
errors = []
# Validate DNS configuration
if dns_proto == 'dhcp':
if dns_servers is not None:
errors.append(
'The dns_servers param cannot be set if unless dns_proto is '
'set to \'static\''
)
else:
if str(dns_servers).lower() in ['none', '[]']:
pass
elif not isinstance(dns_servers, list):
errors.append(
'The dns_servers param must be formatted as a list'
)
else:
bad_ips = [x for x in dns_servers
if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append('Invalid DNS server IPs: {0}'
.format(', '.join(bad_ips)))
# Validate IP configuration
if ip_proto == 'dhcp':
if ip_addrs is not None:
errors.append(
'The ip_addrs param cannot be set if unless ip_proto is set '
'to \'static\''
)
if gateway is not None:
errors.append(
'A gateway IP cannot be set if unless ip_proto is set to '
'\'static\''
)
else:
if not ip_addrs:
errors.append(
'The ip_addrs param is required to set static IPs'
)
elif not isinstance(ip_addrs, list):
errors.append(
'The ip_addrs param must be formatted as a list'
)
else:
bad_ips = [x for x in ip_addrs
if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append('The following static IPs are invalid: '
'{0}'.format(', '.join(bad_ips)))
# Validate default gateway
if gateway is not None:
if not salt.utils.validate.net.ipv4_addr(gateway):
errors.append('Gateway IP {0} is invalid'.format(gateway))
return errors
|
Ensure that the configuration passed is formatted correctly and contains
valid IP addresses, etc.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_network.py#L94-L154
| null |
# -*- coding: utf-8 -*-
'''
Configuration of network interfaces on Windows hosts
====================================================
.. versionadded:: 2014.1.0
This module provides the ``network`` state(s) on Windows hosts. DNS servers, IP
addresses and default gateways can currently be managed.
Below is an example of the configuration for an interface that uses DHCP for
both DNS servers and IP addresses:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: dhcp
- ip_proto: dhcp
.. note::
Both the ``dns_proto`` and ``ip_proto`` arguments are required.
Static DNS and IP addresses can be configured like so:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
.. note::
IP addresses are specified using the format
``<ip-address>/<subnet-length>``. Salt provides a convenience function
called :mod:`ip.get_subnet_length <salt.modules.win_ip.get_subnet_length>`
to calculate the subnet length from a netmask.
Optionally, if you are setting a static IP address, you can also specify the
default gateway using the ``gateway`` parameter:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
- gateway: 10.2.3.1
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
# Import Salt libs
import salt.utils.data
import salt.utils.platform
import salt.utils.validate.net
from salt.ext.six.moves import range
from salt.exceptions import CommandExecutionError
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
__VALID_PROTO = ('static', 'dhcp')
# Define the module's virtual name
__virtualname__ = 'network'
def __virtual__():
'''
Confine this module to Windows systems with the required execution module
available.
'''
if salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__:
return __virtualname__
return False
def _addrdict_to_ip_addrs(addrs):
'''
Extracts a list of IP/CIDR expressions from a list of addrdicts, as
retrieved from ip.get_interface
'''
return [
'{0}/{1}'.format(x['IP Address'], x['Subnet'].rsplit('/', 1)[-1])
for x in addrs
]
def _changes(cur, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
'''
Compares the current interface against the desired configuration and
returns a dictionary describing the changes that need to be made.
'''
changes = {}
cur_dns_proto = (
'static' if 'Statically Configured DNS Servers' in cur
else 'dhcp'
)
if cur_dns_proto == 'static':
if isinstance(cur['Statically Configured DNS Servers'], list):
cur_dns_servers = cur['Statically Configured DNS Servers']
else:
cur_dns_servers = [cur['Statically Configured DNS Servers']]
if set(dns_servers or ['None']) != set(cur_dns_servers):
changes['dns_servers'] = dns_servers
elif 'DNS servers configured through DHCP' in cur:
cur_dns_servers = cur['DNS servers configured through DHCP']
if dns_proto == 'static':
# If we're currently set to 'dhcp' but moving to 'static', specify the changes.
if set(dns_servers or ['None']) != set(cur_dns_servers):
changes['dns_servers'] = dns_servers
cur_ip_proto = 'static' if cur['DHCP enabled'] == 'No' else 'dhcp'
cur_ip_addrs = _addrdict_to_ip_addrs(cur.get('ip_addrs', []))
cur_gateway = cur.get('Default Gateway')
if dns_proto != cur_dns_proto:
changes['dns_proto'] = dns_proto
if ip_proto != cur_ip_proto:
changes['ip_proto'] = ip_proto
if set(ip_addrs or []) != set(cur_ip_addrs):
if ip_proto == 'static':
changes['ip_addrs'] = ip_addrs
if gateway != cur_gateway:
if ip_proto == 'static':
changes['gateway'] = gateway
return changes
def managed(name,
dns_proto=None,
dns_servers=None,
ip_proto=None,
ip_addrs=None,
gateway=None,
enabled=True,
**kwargs):
'''
Ensure that the named interface is configured properly.
Args:
name (str):
The name of the interface to manage
dns_proto (str): None
Set to ``static`` and use the ``dns_servers`` parameter to provide a
list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
servers.
dns_servers (list): None
A list of static DNS servers. To clear the list of DNS servers pass
an empty list (``[]``). ``None`` will make no changes.
ip_proto (str): None
Set to ``static`` and use the ``ip_addrs`` and (optionally)
``gateway`` parameters to provide a list of static IP addresses and
the default gateway. Set to ``dhcp`` to use DHCP.
ip_addrs (list): None
A list of static IP addresses with netmask flag, ie: 192.168.0.11/24
gateway (str): None
The gateway to set for the interface
enabled (bool): True
Set to ``False`` to ensure that this interface is disabled.
Returns:
dict: A dictionary of old and new settings
Example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.8.4
- ip_proto: static
- ip_addrs:
- 192.168.0.100/24
Clear DNS entries example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers: []
- ip_proto: dhcp
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Interface \'{0}\' is up to date'.format(name)
}
dns_proto = six.text_type(dns_proto).lower()
ip_proto = six.text_type(ip_proto).lower()
errors = []
if dns_proto not in __VALID_PROTO:
ret['result'] = False
errors.append('dns_proto must be one of the following: {0}'
.format(', '.join(__VALID_PROTO)))
if ip_proto not in __VALID_PROTO:
errors.append('ip_proto must be one of the following: {0}'
.format(', '.join(__VALID_PROTO)))
if errors:
ret['result'] = False
ret['comment'] = '\n'.join(errors)
return ret
try:
currently_enabled = __salt__['ip.is_enabled'](name)
except CommandExecutionError:
currently_enabled = False
if not enabled:
if currently_enabled:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Interface \'{0}\' will be disabled'
.format(name))
else:
ret['result'] = __salt__['ip.disable'](name)
if not ret['result']:
ret['comment'] = ('Failed to disable interface \'{0}\''
.format(name))
else:
ret['comment'] += ' (already disabled)'
return ret
else:
if not currently_enabled:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Interface \'{0}\' will be enabled'
.format(name))
else:
if not __salt__['ip.enable'](name):
ret['result'] = False
ret['comment'] = ('Failed to enable interface \'{0}\' to '
'make changes'.format(name))
return ret
errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
if errors:
ret['result'] = False
ret['comment'] = ('The following SLS configuration errors were '
'detected:\n- {0}'.format('\n- '.join(errors)))
return ret
old = __salt__['ip.get_interface'](name)
if not old:
ret['result'] = False
ret['comment'] = ('Unable to get current configuration for '
'interface \'{0}\''.format(name))
return ret
changes = _changes(old,
dns_proto,
dns_servers,
ip_proto,
ip_addrs,
gateway)
# If dns_servers is the default `None` make no changes
# To clear the list, pass an empty dict
if str(dns_servers).lower() == 'none':
changes.pop('dns_servers', None)
if not changes:
return ret
if __opts__['test']:
comments = []
if 'dns_proto' in changes:
comments.append('DNS protocol will be changed to: {0}'
.format(changes['dns_proto']))
if dns_proto == 'static' and 'dns_servers' in changes:
if not changes['dns_servers']:
comments.append('The list of DNS servers will be cleared')
else:
comments.append(
'DNS servers will be set to the following: {0}'
.format(', '.join(changes['dns_servers']))
)
if 'ip_proto' in changes:
comments.append('IP protocol will be changed to: {0}'
.format(changes['ip_proto']))
if ip_proto == 'static':
if 'ip_addrs' in changes:
comments.append(
'IP addresses will be set to the following: {0}'
.format(', '.join(changes['ip_addrs']))
)
if 'gateway' in changes:
if changes['gateway'] is None:
comments.append('Default gateway will be removed')
else:
comments.append(
'Default gateway will be set to {0}'
.format(changes['gateway'])
)
ret['result'] = None
ret['comment'] = ('The following changes will be made to '
'interface \'{0}\':\n- {1}'
.format(name, '\n- '.join(comments)))
return ret
if changes.get('dns_proto') == 'dhcp':
__salt__['ip.set_dhcp_dns'](name)
elif 'dns_servers' in changes:
if not changes['dns_servers']:
# To clear the list of DNS servers you have to pass []. Later
# changes gets passed like *args and a single empty list is
# converted to an empty tuple. So, you have to add [] here
changes['dns_servers'] = [[]]
__salt__['ip.set_static_dns'](name, *changes['dns_servers'])
if changes.get('ip_proto') == 'dhcp':
__salt__['ip.set_dhcp_ip'](name)
elif changes.get('ip_addrs') or changes.get('gateway') or changes.get('ip_proto') == 'static':
if changes.get('gateway') and not changes.get('ip_addrs'):
changes['ip_addrs'] = ip_addrs
if changes.get('ip_proto') == 'static' and not changes.get('ip_addrs'):
changes['ip_addrs'] = ip_addrs
for idx in range(len(changes['ip_addrs'])):
if idx == 0:
__salt__['ip.set_static_ip'](
name,
changes['ip_addrs'][idx],
gateway=gateway,
append=False
)
else:
__salt__['ip.set_static_ip'](
name,
changes['ip_addrs'][idx],
gateway=None,
append=True
)
new = __salt__['ip.get_interface'](name)
ret['changes'] = salt.utils.data.compare_dicts(old, new)
if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
ret['result'] = False
ret['comment'] = ('Failed to set desired configuration settings '
'for interface \'{0}\''.format(name))
else:
ret['comment'] = ('Successfully updated configuration for '
'interface \'{0}\''.format(name))
return ret
|
saltstack/salt
|
salt/states/win_network.py
|
_changes
|
python
|
def _changes(cur, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
'''
Compares the current interface against the desired configuration and
returns a dictionary describing the changes that need to be made.
'''
changes = {}
cur_dns_proto = (
'static' if 'Statically Configured DNS Servers' in cur
else 'dhcp'
)
if cur_dns_proto == 'static':
if isinstance(cur['Statically Configured DNS Servers'], list):
cur_dns_servers = cur['Statically Configured DNS Servers']
else:
cur_dns_servers = [cur['Statically Configured DNS Servers']]
if set(dns_servers or ['None']) != set(cur_dns_servers):
changes['dns_servers'] = dns_servers
elif 'DNS servers configured through DHCP' in cur:
cur_dns_servers = cur['DNS servers configured through DHCP']
if dns_proto == 'static':
# If we're currently set to 'dhcp' but moving to 'static', specify the changes.
if set(dns_servers or ['None']) != set(cur_dns_servers):
changes['dns_servers'] = dns_servers
cur_ip_proto = 'static' if cur['DHCP enabled'] == 'No' else 'dhcp'
cur_ip_addrs = _addrdict_to_ip_addrs(cur.get('ip_addrs', []))
cur_gateway = cur.get('Default Gateway')
if dns_proto != cur_dns_proto:
changes['dns_proto'] = dns_proto
if ip_proto != cur_ip_proto:
changes['ip_proto'] = ip_proto
if set(ip_addrs or []) != set(cur_ip_addrs):
if ip_proto == 'static':
changes['ip_addrs'] = ip_addrs
if gateway != cur_gateway:
if ip_proto == 'static':
changes['gateway'] = gateway
return changes
|
Compares the current interface against the desired configuration and
returns a dictionary describing the changes that need to be made.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_network.py#L168-L206
| null |
# -*- coding: utf-8 -*-
'''
Configuration of network interfaces on Windows hosts
====================================================
.. versionadded:: 2014.1.0
This module provides the ``network`` state(s) on Windows hosts. DNS servers, IP
addresses and default gateways can currently be managed.
Below is an example of the configuration for an interface that uses DHCP for
both DNS servers and IP addresses:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: dhcp
- ip_proto: dhcp
.. note::
Both the ``dns_proto`` and ``ip_proto`` arguments are required.
Static DNS and IP addresses can be configured like so:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
.. note::
IP addresses are specified using the format
``<ip-address>/<subnet-length>``. Salt provides a convenience function
called :mod:`ip.get_subnet_length <salt.modules.win_ip.get_subnet_length>`
to calculate the subnet length from a netmask.
Optionally, if you are setting a static IP address, you can also specify the
default gateway using the ``gateway`` parameter:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
- gateway: 10.2.3.1
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
# Import Salt libs
import salt.utils.data
import salt.utils.platform
import salt.utils.validate.net
from salt.ext.six.moves import range
from salt.exceptions import CommandExecutionError
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
__VALID_PROTO = ('static', 'dhcp')
# Define the module's virtual name
__virtualname__ = 'network'
def __virtual__():
'''
Confine this module to Windows systems with the required execution module
available.
'''
if salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__:
return __virtualname__
return False
def _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
'''
Ensure that the configuration passed is formatted correctly and contains
valid IP addresses, etc.
'''
errors = []
# Validate DNS configuration
if dns_proto == 'dhcp':
if dns_servers is not None:
errors.append(
'The dns_servers param cannot be set if unless dns_proto is '
'set to \'static\''
)
else:
if str(dns_servers).lower() in ['none', '[]']:
pass
elif not isinstance(dns_servers, list):
errors.append(
'The dns_servers param must be formatted as a list'
)
else:
bad_ips = [x for x in dns_servers
if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append('Invalid DNS server IPs: {0}'
.format(', '.join(bad_ips)))
# Validate IP configuration
if ip_proto == 'dhcp':
if ip_addrs is not None:
errors.append(
'The ip_addrs param cannot be set if unless ip_proto is set '
'to \'static\''
)
if gateway is not None:
errors.append(
'A gateway IP cannot be set if unless ip_proto is set to '
'\'static\''
)
else:
if not ip_addrs:
errors.append(
'The ip_addrs param is required to set static IPs'
)
elif not isinstance(ip_addrs, list):
errors.append(
'The ip_addrs param must be formatted as a list'
)
else:
bad_ips = [x for x in ip_addrs
if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append('The following static IPs are invalid: '
'{0}'.format(', '.join(bad_ips)))
# Validate default gateway
if gateway is not None:
if not salt.utils.validate.net.ipv4_addr(gateway):
errors.append('Gateway IP {0} is invalid'.format(gateway))
return errors
def _addrdict_to_ip_addrs(addrs):
'''
Extracts a list of IP/CIDR expressions from a list of addrdicts, as
retrieved from ip.get_interface
'''
return [
'{0}/{1}'.format(x['IP Address'], x['Subnet'].rsplit('/', 1)[-1])
for x in addrs
]
def managed(name,
dns_proto=None,
dns_servers=None,
ip_proto=None,
ip_addrs=None,
gateway=None,
enabled=True,
**kwargs):
'''
Ensure that the named interface is configured properly.
Args:
name (str):
The name of the interface to manage
dns_proto (str): None
Set to ``static`` and use the ``dns_servers`` parameter to provide a
list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
servers.
dns_servers (list): None
A list of static DNS servers. To clear the list of DNS servers pass
an empty list (``[]``). ``None`` will make no changes.
ip_proto (str): None
Set to ``static`` and use the ``ip_addrs`` and (optionally)
``gateway`` parameters to provide a list of static IP addresses and
the default gateway. Set to ``dhcp`` to use DHCP.
ip_addrs (list): None
A list of static IP addresses with netmask flag, ie: 192.168.0.11/24
gateway (str): None
The gateway to set for the interface
enabled (bool): True
Set to ``False`` to ensure that this interface is disabled.
Returns:
dict: A dictionary of old and new settings
Example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.8.4
- ip_proto: static
- ip_addrs:
- 192.168.0.100/24
Clear DNS entries example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers: []
- ip_proto: dhcp
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Interface \'{0}\' is up to date'.format(name)
}
dns_proto = six.text_type(dns_proto).lower()
ip_proto = six.text_type(ip_proto).lower()
errors = []
if dns_proto not in __VALID_PROTO:
ret['result'] = False
errors.append('dns_proto must be one of the following: {0}'
.format(', '.join(__VALID_PROTO)))
if ip_proto not in __VALID_PROTO:
errors.append('ip_proto must be one of the following: {0}'
.format(', '.join(__VALID_PROTO)))
if errors:
ret['result'] = False
ret['comment'] = '\n'.join(errors)
return ret
try:
currently_enabled = __salt__['ip.is_enabled'](name)
except CommandExecutionError:
currently_enabled = False
if not enabled:
if currently_enabled:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Interface \'{0}\' will be disabled'
.format(name))
else:
ret['result'] = __salt__['ip.disable'](name)
if not ret['result']:
ret['comment'] = ('Failed to disable interface \'{0}\''
.format(name))
else:
ret['comment'] += ' (already disabled)'
return ret
else:
if not currently_enabled:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Interface \'{0}\' will be enabled'
.format(name))
else:
if not __salt__['ip.enable'](name):
ret['result'] = False
ret['comment'] = ('Failed to enable interface \'{0}\' to '
'make changes'.format(name))
return ret
errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
if errors:
ret['result'] = False
ret['comment'] = ('The following SLS configuration errors were '
'detected:\n- {0}'.format('\n- '.join(errors)))
return ret
old = __salt__['ip.get_interface'](name)
if not old:
ret['result'] = False
ret['comment'] = ('Unable to get current configuration for '
'interface \'{0}\''.format(name))
return ret
changes = _changes(old,
dns_proto,
dns_servers,
ip_proto,
ip_addrs,
gateway)
# If dns_servers is the default `None` make no changes
# To clear the list, pass an empty dict
if str(dns_servers).lower() == 'none':
changes.pop('dns_servers', None)
if not changes:
return ret
if __opts__['test']:
comments = []
if 'dns_proto' in changes:
comments.append('DNS protocol will be changed to: {0}'
.format(changes['dns_proto']))
if dns_proto == 'static' and 'dns_servers' in changes:
if not changes['dns_servers']:
comments.append('The list of DNS servers will be cleared')
else:
comments.append(
'DNS servers will be set to the following: {0}'
.format(', '.join(changes['dns_servers']))
)
if 'ip_proto' in changes:
comments.append('IP protocol will be changed to: {0}'
.format(changes['ip_proto']))
if ip_proto == 'static':
if 'ip_addrs' in changes:
comments.append(
'IP addresses will be set to the following: {0}'
.format(', '.join(changes['ip_addrs']))
)
if 'gateway' in changes:
if changes['gateway'] is None:
comments.append('Default gateway will be removed')
else:
comments.append(
'Default gateway will be set to {0}'
.format(changes['gateway'])
)
ret['result'] = None
ret['comment'] = ('The following changes will be made to '
'interface \'{0}\':\n- {1}'
.format(name, '\n- '.join(comments)))
return ret
if changes.get('dns_proto') == 'dhcp':
__salt__['ip.set_dhcp_dns'](name)
elif 'dns_servers' in changes:
if not changes['dns_servers']:
# To clear the list of DNS servers you have to pass []. Later
# changes gets passed like *args and a single empty list is
# converted to an empty tuple. So, you have to add [] here
changes['dns_servers'] = [[]]
__salt__['ip.set_static_dns'](name, *changes['dns_servers'])
if changes.get('ip_proto') == 'dhcp':
__salt__['ip.set_dhcp_ip'](name)
elif changes.get('ip_addrs') or changes.get('gateway') or changes.get('ip_proto') == 'static':
if changes.get('gateway') and not changes.get('ip_addrs'):
changes['ip_addrs'] = ip_addrs
if changes.get('ip_proto') == 'static' and not changes.get('ip_addrs'):
changes['ip_addrs'] = ip_addrs
for idx in range(len(changes['ip_addrs'])):
if idx == 0:
__salt__['ip.set_static_ip'](
name,
changes['ip_addrs'][idx],
gateway=gateway,
append=False
)
else:
__salt__['ip.set_static_ip'](
name,
changes['ip_addrs'][idx],
gateway=None,
append=True
)
new = __salt__['ip.get_interface'](name)
ret['changes'] = salt.utils.data.compare_dicts(old, new)
if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
ret['result'] = False
ret['comment'] = ('Failed to set desired configuration settings '
'for interface \'{0}\''.format(name))
else:
ret['comment'] = ('Successfully updated configuration for '
'interface \'{0}\''.format(name))
return ret
|
saltstack/salt
|
salt/states/win_network.py
|
managed
|
python
|
def managed(name,
dns_proto=None,
dns_servers=None,
ip_proto=None,
ip_addrs=None,
gateway=None,
enabled=True,
**kwargs):
'''
Ensure that the named interface is configured properly.
Args:
name (str):
The name of the interface to manage
dns_proto (str): None
Set to ``static`` and use the ``dns_servers`` parameter to provide a
list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
servers.
dns_servers (list): None
A list of static DNS servers. To clear the list of DNS servers pass
an empty list (``[]``). ``None`` will make no changes.
ip_proto (str): None
Set to ``static`` and use the ``ip_addrs`` and (optionally)
``gateway`` parameters to provide a list of static IP addresses and
the default gateway. Set to ``dhcp`` to use DHCP.
ip_addrs (list): None
A list of static IP addresses with netmask flag, ie: 192.168.0.11/24
gateway (str): None
The gateway to set for the interface
enabled (bool): True
Set to ``False`` to ensure that this interface is disabled.
Returns:
dict: A dictionary of old and new settings
Example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.8.4
- ip_proto: static
- ip_addrs:
- 192.168.0.100/24
Clear DNS entries example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers: []
- ip_proto: dhcp
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Interface \'{0}\' is up to date'.format(name)
}
dns_proto = six.text_type(dns_proto).lower()
ip_proto = six.text_type(ip_proto).lower()
errors = []
if dns_proto not in __VALID_PROTO:
ret['result'] = False
errors.append('dns_proto must be one of the following: {0}'
.format(', '.join(__VALID_PROTO)))
if ip_proto not in __VALID_PROTO:
errors.append('ip_proto must be one of the following: {0}'
.format(', '.join(__VALID_PROTO)))
if errors:
ret['result'] = False
ret['comment'] = '\n'.join(errors)
return ret
try:
currently_enabled = __salt__['ip.is_enabled'](name)
except CommandExecutionError:
currently_enabled = False
if not enabled:
if currently_enabled:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Interface \'{0}\' will be disabled'
.format(name))
else:
ret['result'] = __salt__['ip.disable'](name)
if not ret['result']:
ret['comment'] = ('Failed to disable interface \'{0}\''
.format(name))
else:
ret['comment'] += ' (already disabled)'
return ret
else:
if not currently_enabled:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Interface \'{0}\' will be enabled'
.format(name))
else:
if not __salt__['ip.enable'](name):
ret['result'] = False
ret['comment'] = ('Failed to enable interface \'{0}\' to '
'make changes'.format(name))
return ret
errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
if errors:
ret['result'] = False
ret['comment'] = ('The following SLS configuration errors were '
'detected:\n- {0}'.format('\n- '.join(errors)))
return ret
old = __salt__['ip.get_interface'](name)
if not old:
ret['result'] = False
ret['comment'] = ('Unable to get current configuration for '
'interface \'{0}\''.format(name))
return ret
changes = _changes(old,
dns_proto,
dns_servers,
ip_proto,
ip_addrs,
gateway)
# If dns_servers is the default `None` make no changes
# To clear the list, pass an empty dict
if str(dns_servers).lower() == 'none':
changes.pop('dns_servers', None)
if not changes:
return ret
if __opts__['test']:
comments = []
if 'dns_proto' in changes:
comments.append('DNS protocol will be changed to: {0}'
.format(changes['dns_proto']))
if dns_proto == 'static' and 'dns_servers' in changes:
if not changes['dns_servers']:
comments.append('The list of DNS servers will be cleared')
else:
comments.append(
'DNS servers will be set to the following: {0}'
.format(', '.join(changes['dns_servers']))
)
if 'ip_proto' in changes:
comments.append('IP protocol will be changed to: {0}'
.format(changes['ip_proto']))
if ip_proto == 'static':
if 'ip_addrs' in changes:
comments.append(
'IP addresses will be set to the following: {0}'
.format(', '.join(changes['ip_addrs']))
)
if 'gateway' in changes:
if changes['gateway'] is None:
comments.append('Default gateway will be removed')
else:
comments.append(
'Default gateway will be set to {0}'
.format(changes['gateway'])
)
ret['result'] = None
ret['comment'] = ('The following changes will be made to '
'interface \'{0}\':\n- {1}'
.format(name, '\n- '.join(comments)))
return ret
if changes.get('dns_proto') == 'dhcp':
__salt__['ip.set_dhcp_dns'](name)
elif 'dns_servers' in changes:
if not changes['dns_servers']:
# To clear the list of DNS servers you have to pass []. Later
# changes gets passed like *args and a single empty list is
# converted to an empty tuple. So, you have to add [] here
changes['dns_servers'] = [[]]
__salt__['ip.set_static_dns'](name, *changes['dns_servers'])
if changes.get('ip_proto') == 'dhcp':
__salt__['ip.set_dhcp_ip'](name)
elif changes.get('ip_addrs') or changes.get('gateway') or changes.get('ip_proto') == 'static':
if changes.get('gateway') and not changes.get('ip_addrs'):
changes['ip_addrs'] = ip_addrs
if changes.get('ip_proto') == 'static' and not changes.get('ip_addrs'):
changes['ip_addrs'] = ip_addrs
for idx in range(len(changes['ip_addrs'])):
if idx == 0:
__salt__['ip.set_static_ip'](
name,
changes['ip_addrs'][idx],
gateway=gateway,
append=False
)
else:
__salt__['ip.set_static_ip'](
name,
changes['ip_addrs'][idx],
gateway=None,
append=True
)
new = __salt__['ip.get_interface'](name)
ret['changes'] = salt.utils.data.compare_dicts(old, new)
if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
ret['result'] = False
ret['comment'] = ('Failed to set desired configuration settings '
'for interface \'{0}\''.format(name))
else:
ret['comment'] = ('Successfully updated configuration for '
'interface \'{0}\''.format(name))
return ret
|
Ensure that the named interface is configured properly.
Args:
name (str):
The name of the interface to manage
dns_proto (str): None
Set to ``static`` and use the ``dns_servers`` parameter to provide a
list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
servers.
dns_servers (list): None
A list of static DNS servers. To clear the list of DNS servers pass
an empty list (``[]``). ``None`` will make no changes.
ip_proto (str): None
Set to ``static`` and use the ``ip_addrs`` and (optionally)
``gateway`` parameters to provide a list of static IP addresses and
the default gateway. Set to ``dhcp`` to use DHCP.
ip_addrs (list): None
A list of static IP addresses with netmask flag, ie: 192.168.0.11/24
gateway (str): None
The gateway to set for the interface
enabled (bool): True
Set to ``False`` to ensure that this interface is disabled.
Returns:
dict: A dictionary of old and new settings
Example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.8.4
- ip_proto: static
- ip_addrs:
- 192.168.0.100/24
Clear DNS entries example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers: []
- ip_proto: dhcp
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_network.py#L209-L442
|
[
"def _changes(cur, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):\n '''\n Compares the current interface against the desired configuration and\n returns a dictionary describing the changes that need to be made.\n '''\n changes = {}\n cur_dns_proto = (\n 'static' if 'Statically Configured DNS Servers' in cur\n else 'dhcp'\n )\n if cur_dns_proto == 'static':\n if isinstance(cur['Statically Configured DNS Servers'], list):\n cur_dns_servers = cur['Statically Configured DNS Servers']\n else:\n cur_dns_servers = [cur['Statically Configured DNS Servers']]\n if set(dns_servers or ['None']) != set(cur_dns_servers):\n changes['dns_servers'] = dns_servers\n elif 'DNS servers configured through DHCP' in cur:\n cur_dns_servers = cur['DNS servers configured through DHCP']\n if dns_proto == 'static':\n # If we're currently set to 'dhcp' but moving to 'static', specify the changes.\n if set(dns_servers or ['None']) != set(cur_dns_servers):\n changes['dns_servers'] = dns_servers\n\n cur_ip_proto = 'static' if cur['DHCP enabled'] == 'No' else 'dhcp'\n cur_ip_addrs = _addrdict_to_ip_addrs(cur.get('ip_addrs', []))\n cur_gateway = cur.get('Default Gateway')\n\n if dns_proto != cur_dns_proto:\n changes['dns_proto'] = dns_proto\n if ip_proto != cur_ip_proto:\n changes['ip_proto'] = ip_proto\n if set(ip_addrs or []) != set(cur_ip_addrs):\n if ip_proto == 'static':\n changes['ip_addrs'] = ip_addrs\n if gateway != cur_gateway:\n if ip_proto == 'static':\n changes['gateway'] = gateway\n return changes\n",
"def _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway):\n '''\n Ensure that the configuration passed is formatted correctly and contains\n valid IP addresses, etc.\n '''\n errors = []\n # Validate DNS configuration\n if dns_proto == 'dhcp':\n if dns_servers is not None:\n errors.append(\n 'The dns_servers param cannot be set if unless dns_proto is '\n 'set to \\'static\\''\n )\n else:\n if str(dns_servers).lower() in ['none', '[]']:\n pass\n elif not isinstance(dns_servers, list):\n errors.append(\n 'The dns_servers param must be formatted as a list'\n )\n else:\n bad_ips = [x for x in dns_servers\n if not salt.utils.validate.net.ipv4_addr(x)]\n if bad_ips:\n errors.append('Invalid DNS server IPs: {0}'\n .format(', '.join(bad_ips)))\n\n # Validate IP configuration\n if ip_proto == 'dhcp':\n if ip_addrs is not None:\n errors.append(\n 'The ip_addrs param cannot be set if unless ip_proto is set '\n 'to \\'static\\''\n )\n if gateway is not None:\n errors.append(\n 'A gateway IP cannot be set if unless ip_proto is set to '\n '\\'static\\''\n )\n else:\n if not ip_addrs:\n errors.append(\n 'The ip_addrs param is required to set static IPs'\n )\n elif not isinstance(ip_addrs, list):\n errors.append(\n 'The ip_addrs param must be formatted as a list'\n )\n else:\n bad_ips = [x for x in ip_addrs\n if not salt.utils.validate.net.ipv4_addr(x)]\n if bad_ips:\n errors.append('The following static IPs are invalid: '\n '{0}'.format(', '.join(bad_ips)))\n\n # Validate default gateway\n if gateway is not None:\n if not salt.utils.validate.net.ipv4_addr(gateway):\n errors.append('Gateway IP {0} is invalid'.format(gateway))\n\n return errors\n"
] |
# -*- coding: utf-8 -*-
'''
Configuration of network interfaces on Windows hosts
====================================================
.. versionadded:: 2014.1.0
This module provides the ``network`` state(s) on Windows hosts. DNS servers, IP
addresses and default gateways can currently be managed.
Below is an example of the configuration for an interface that uses DHCP for
both DNS servers and IP addresses:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: dhcp
- ip_proto: dhcp
.. note::
Both the ``dns_proto`` and ``ip_proto`` arguments are required.
Static DNS and IP addresses can be configured like so:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
.. note::
IP addresses are specified using the format
``<ip-address>/<subnet-length>``. Salt provides a convenience function
called :mod:`ip.get_subnet_length <salt.modules.win_ip.get_subnet_length>`
to calculate the subnet length from a netmask.
Optionally, if you are setting a static IP address, you can also specify the
default gateway using the ``gateway`` parameter:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
- gateway: 10.2.3.1
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
# Import Salt libs
import salt.utils.data
import salt.utils.platform
import salt.utils.validate.net
from salt.ext.six.moves import range
from salt.exceptions import CommandExecutionError
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
__VALID_PROTO = ('static', 'dhcp')
# Define the module's virtual name
__virtualname__ = 'network'
def __virtual__():
'''
Confine this module to Windows systems with the required execution module
available.
'''
if salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__:
return __virtualname__
return False
def _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
'''
Ensure that the configuration passed is formatted correctly and contains
valid IP addresses, etc.
'''
errors = []
# Validate DNS configuration
if dns_proto == 'dhcp':
if dns_servers is not None:
errors.append(
'The dns_servers param cannot be set if unless dns_proto is '
'set to \'static\''
)
else:
if str(dns_servers).lower() in ['none', '[]']:
pass
elif not isinstance(dns_servers, list):
errors.append(
'The dns_servers param must be formatted as a list'
)
else:
bad_ips = [x for x in dns_servers
if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append('Invalid DNS server IPs: {0}'
.format(', '.join(bad_ips)))
# Validate IP configuration
if ip_proto == 'dhcp':
if ip_addrs is not None:
errors.append(
'The ip_addrs param cannot be set if unless ip_proto is set '
'to \'static\''
)
if gateway is not None:
errors.append(
'A gateway IP cannot be set if unless ip_proto is set to '
'\'static\''
)
else:
if not ip_addrs:
errors.append(
'The ip_addrs param is required to set static IPs'
)
elif not isinstance(ip_addrs, list):
errors.append(
'The ip_addrs param must be formatted as a list'
)
else:
bad_ips = [x for x in ip_addrs
if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append('The following static IPs are invalid: '
'{0}'.format(', '.join(bad_ips)))
# Validate default gateway
if gateway is not None:
if not salt.utils.validate.net.ipv4_addr(gateway):
errors.append('Gateway IP {0} is invalid'.format(gateway))
return errors
def _addrdict_to_ip_addrs(addrs):
'''
Extracts a list of IP/CIDR expressions from a list of addrdicts, as
retrieved from ip.get_interface
'''
return [
'{0}/{1}'.format(x['IP Address'], x['Subnet'].rsplit('/', 1)[-1])
for x in addrs
]
def _changes(cur, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
'''
Compares the current interface against the desired configuration and
returns a dictionary describing the changes that need to be made.
'''
changes = {}
cur_dns_proto = (
'static' if 'Statically Configured DNS Servers' in cur
else 'dhcp'
)
if cur_dns_proto == 'static':
if isinstance(cur['Statically Configured DNS Servers'], list):
cur_dns_servers = cur['Statically Configured DNS Servers']
else:
cur_dns_servers = [cur['Statically Configured DNS Servers']]
if set(dns_servers or ['None']) != set(cur_dns_servers):
changes['dns_servers'] = dns_servers
elif 'DNS servers configured through DHCP' in cur:
cur_dns_servers = cur['DNS servers configured through DHCP']
if dns_proto == 'static':
# If we're currently set to 'dhcp' but moving to 'static', specify the changes.
if set(dns_servers or ['None']) != set(cur_dns_servers):
changes['dns_servers'] = dns_servers
cur_ip_proto = 'static' if cur['DHCP enabled'] == 'No' else 'dhcp'
cur_ip_addrs = _addrdict_to_ip_addrs(cur.get('ip_addrs', []))
cur_gateway = cur.get('Default Gateway')
if dns_proto != cur_dns_proto:
changes['dns_proto'] = dns_proto
if ip_proto != cur_ip_proto:
changes['ip_proto'] = ip_proto
if set(ip_addrs or []) != set(cur_ip_addrs):
if ip_proto == 'static':
changes['ip_addrs'] = ip_addrs
if gateway != cur_gateway:
if ip_proto == 'static':
changes['gateway'] = gateway
return changes
|
saltstack/salt
|
salt/utils/rsax931.py
|
_load_libcrypto
|
python
|
def _load_libcrypto():
'''
Load OpenSSL libcrypto
'''
if sys.platform.startswith('win'):
# cdll.LoadLibrary on windows requires an 'str' argument
return cdll.LoadLibrary(str('libeay32')) # future lint: disable=blacklisted-function
elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos():
return cdll.LoadLibrary(glob.glob(os.path.join(
os.path.dirname(sys.executable),
'libcrypto.so*'))[0])
else:
lib = find_library('crypto')
if not lib and sys.platform.startswith('sunos5'):
# ctypes.util.find_library defaults to 32 bit library path on sunos5, test for 64 bit python execution
lib = find_library('crypto', sys.maxsize > 2**32)
if not lib and salt.utils.platform.is_sunos():
# Solaris-like distribution that use pkgsrc have
# libraries in a non standard location.
# (SmartOS, OmniOS, OpenIndiana, ...)
# This could be /opt/tools/lib (Global Zone)
# or /opt/local/lib (non-Global Zone), thus the
# two checks below
lib = glob.glob('/opt/local/lib/libcrypto.so*') + glob.glob('/opt/tools/lib/libcrypto.so*')
lib = lib[0] if lib else None
if lib:
return cdll.LoadLibrary(lib)
raise OSError('Cannot locate OpenSSL libcrypto')
|
Load OpenSSL libcrypto
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/rsax931.py#L26-L53
| null |
# -*- coding: utf-8 -*-
'''
Create and verify ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import glob
import sys
import os
# Import Salt libs
import salt.utils.platform
import salt.utils.stringutils
# Import 3rd-party libs
from ctypes import cdll, c_char_p, c_int, c_void_p, pointer, create_string_buffer
from ctypes.util import find_library
# Constants taken from openssl-1.1.0c/include/openssl/crypto.h
OPENSSL_INIT_ADD_ALL_CIPHERS = 0x00000004
OPENSSL_INIT_ADD_ALL_DIGESTS = 0x00000008
OPENSSL_INIT_NO_LOAD_CONFIG = 0x00000080
def _init_libcrypto():
'''
Set up libcrypto argtypes and initialize the library
'''
libcrypto = _load_libcrypto()
try:
libcrypto.OPENSSL_init_crypto()
except AttributeError:
# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)
libcrypto.OPENSSL_no_config()
libcrypto.OPENSSL_add_all_algorithms_noconf()
libcrypto.RSA_new.argtypes = ()
libcrypto.RSA_new.restype = c_void_p
libcrypto.RSA_free.argtypes = (c_void_p, )
libcrypto.RSA_size.argtype = (c_void_p)
libcrypto.BIO_new_mem_buf.argtypes = (c_char_p, c_int)
libcrypto.BIO_new_mem_buf.restype = c_void_p
libcrypto.BIO_free.argtypes = (c_void_p, )
libcrypto.PEM_read_bio_RSAPrivateKey.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSAPrivateKey.restype = c_void_p
libcrypto.PEM_read_bio_RSA_PUBKEY.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSA_PUBKEY.restype = c_void_p
libcrypto.RSA_private_encrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
libcrypto.RSA_public_decrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
return libcrypto
libcrypto = _init_libcrypto()
# openssl/rsa.h:#define RSA_X931_PADDING 5
RSA_X931_PADDING = 5
class RSAX931Signer(object):
'''
Create ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
def __init__(self, keydata):
'''
Init an RSAX931Signer instance
:param str keydata: The RSA private key in PEM format
'''
keydata = salt.utils.stringutils.to_bytes(keydata, 'ascii')
self._bio = libcrypto.BIO_new_mem_buf(keydata, len(keydata))
self._rsa = c_void_p(libcrypto.RSA_new())
if not libcrypto.PEM_read_bio_RSAPrivateKey(self._bio, pointer(self._rsa), None, None):
raise ValueError('invalid RSA private key')
def __del__(self):
libcrypto.BIO_free(self._bio)
libcrypto.RSA_free(self._rsa)
def sign(self, msg):
'''
Sign a message (digest) using the private key
:param str msg: The message (digest) to sign
:rtype: str
:return: The signature, or an empty string if the encryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
msg = salt.utils.stringutils.to_bytes(msg)
size = libcrypto.RSA_private_encrypt(len(msg), msg, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to encrypt message')
return buf[0:size]
class RSAX931Verifier(object):
'''
Verify ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
def __init__(self, pubdata):
'''
Init an RSAX931Verifier instance
:param str pubdata: The RSA public key in PEM format
'''
pubdata = salt.utils.stringutils.to_bytes(pubdata, 'ascii')
pubdata = pubdata.replace(b'RSA ', b'')
self._bio = libcrypto.BIO_new_mem_buf(pubdata, len(pubdata))
self._rsa = c_void_p(libcrypto.RSA_new())
if not libcrypto.PEM_read_bio_RSA_PUBKEY(self._bio, pointer(self._rsa), None, None):
raise ValueError('invalid RSA public key')
def __del__(self):
libcrypto.BIO_free(self._bio)
libcrypto.RSA_free(self._rsa)
def verify(self, signed):
'''
Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to decrypt message')
return buf[0:size]
|
saltstack/salt
|
salt/utils/rsax931.py
|
_init_libcrypto
|
python
|
def _init_libcrypto():
'''
Set up libcrypto argtypes and initialize the library
'''
libcrypto = _load_libcrypto()
try:
libcrypto.OPENSSL_init_crypto()
except AttributeError:
# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)
libcrypto.OPENSSL_no_config()
libcrypto.OPENSSL_add_all_algorithms_noconf()
libcrypto.RSA_new.argtypes = ()
libcrypto.RSA_new.restype = c_void_p
libcrypto.RSA_free.argtypes = (c_void_p, )
libcrypto.RSA_size.argtype = (c_void_p)
libcrypto.BIO_new_mem_buf.argtypes = (c_char_p, c_int)
libcrypto.BIO_new_mem_buf.restype = c_void_p
libcrypto.BIO_free.argtypes = (c_void_p, )
libcrypto.PEM_read_bio_RSAPrivateKey.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSAPrivateKey.restype = c_void_p
libcrypto.PEM_read_bio_RSA_PUBKEY.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSA_PUBKEY.restype = c_void_p
libcrypto.RSA_private_encrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
libcrypto.RSA_public_decrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
return libcrypto
|
Set up libcrypto argtypes and initialize the library
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/rsax931.py#L56-L83
|
[
"def _load_libcrypto():\n '''\n Load OpenSSL libcrypto\n '''\n if sys.platform.startswith('win'):\n # cdll.LoadLibrary on windows requires an 'str' argument\n return cdll.LoadLibrary(str('libeay32')) # future lint: disable=blacklisted-function\n elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos():\n return cdll.LoadLibrary(glob.glob(os.path.join(\n os.path.dirname(sys.executable),\n 'libcrypto.so*'))[0])\n else:\n lib = find_library('crypto')\n if not lib and sys.platform.startswith('sunos5'):\n # ctypes.util.find_library defaults to 32 bit library path on sunos5, test for 64 bit python execution\n lib = find_library('crypto', sys.maxsize > 2**32)\n if not lib and salt.utils.platform.is_sunos():\n # Solaris-like distribution that use pkgsrc have\n # libraries in a non standard location.\n # (SmartOS, OmniOS, OpenIndiana, ...)\n # This could be /opt/tools/lib (Global Zone)\n # or /opt/local/lib (non-Global Zone), thus the\n # two checks below\n lib = glob.glob('/opt/local/lib/libcrypto.so*') + glob.glob('/opt/tools/lib/libcrypto.so*')\n lib = lib[0] if lib else None\n if lib:\n return cdll.LoadLibrary(lib)\n raise OSError('Cannot locate OpenSSL libcrypto')\n"
] |
# -*- coding: utf-8 -*-
'''
Create and verify ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import glob
import sys
import os
# Import Salt libs
import salt.utils.platform
import salt.utils.stringutils
# Import 3rd-party libs
from ctypes import cdll, c_char_p, c_int, c_void_p, pointer, create_string_buffer
from ctypes.util import find_library
# Constants taken from openssl-1.1.0c/include/openssl/crypto.h
OPENSSL_INIT_ADD_ALL_CIPHERS = 0x00000004
OPENSSL_INIT_ADD_ALL_DIGESTS = 0x00000008
OPENSSL_INIT_NO_LOAD_CONFIG = 0x00000080
def _load_libcrypto():
'''
Load OpenSSL libcrypto
'''
if sys.platform.startswith('win'):
# cdll.LoadLibrary on windows requires an 'str' argument
return cdll.LoadLibrary(str('libeay32')) # future lint: disable=blacklisted-function
elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos():
return cdll.LoadLibrary(glob.glob(os.path.join(
os.path.dirname(sys.executable),
'libcrypto.so*'))[0])
else:
lib = find_library('crypto')
if not lib and sys.platform.startswith('sunos5'):
# ctypes.util.find_library defaults to 32 bit library path on sunos5, test for 64 bit python execution
lib = find_library('crypto', sys.maxsize > 2**32)
if not lib and salt.utils.platform.is_sunos():
# Solaris-like distribution that use pkgsrc have
# libraries in a non standard location.
# (SmartOS, OmniOS, OpenIndiana, ...)
# This could be /opt/tools/lib (Global Zone)
# or /opt/local/lib (non-Global Zone), thus the
# two checks below
lib = glob.glob('/opt/local/lib/libcrypto.so*') + glob.glob('/opt/tools/lib/libcrypto.so*')
lib = lib[0] if lib else None
if lib:
return cdll.LoadLibrary(lib)
raise OSError('Cannot locate OpenSSL libcrypto')
libcrypto = _init_libcrypto()
# openssl/rsa.h:#define RSA_X931_PADDING 5
RSA_X931_PADDING = 5
class RSAX931Signer(object):
'''
Create ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
def __init__(self, keydata):
'''
Init an RSAX931Signer instance
:param str keydata: The RSA private key in PEM format
'''
keydata = salt.utils.stringutils.to_bytes(keydata, 'ascii')
self._bio = libcrypto.BIO_new_mem_buf(keydata, len(keydata))
self._rsa = c_void_p(libcrypto.RSA_new())
if not libcrypto.PEM_read_bio_RSAPrivateKey(self._bio, pointer(self._rsa), None, None):
raise ValueError('invalid RSA private key')
def __del__(self):
libcrypto.BIO_free(self._bio)
libcrypto.RSA_free(self._rsa)
def sign(self, msg):
'''
Sign a message (digest) using the private key
:param str msg: The message (digest) to sign
:rtype: str
:return: The signature, or an empty string if the encryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
msg = salt.utils.stringutils.to_bytes(msg)
size = libcrypto.RSA_private_encrypt(len(msg), msg, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to encrypt message')
return buf[0:size]
class RSAX931Verifier(object):
'''
Verify ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
def __init__(self, pubdata):
'''
Init an RSAX931Verifier instance
:param str pubdata: The RSA public key in PEM format
'''
pubdata = salt.utils.stringutils.to_bytes(pubdata, 'ascii')
pubdata = pubdata.replace(b'RSA ', b'')
self._bio = libcrypto.BIO_new_mem_buf(pubdata, len(pubdata))
self._rsa = c_void_p(libcrypto.RSA_new())
if not libcrypto.PEM_read_bio_RSA_PUBKEY(self._bio, pointer(self._rsa), None, None):
raise ValueError('invalid RSA public key')
def __del__(self):
libcrypto.BIO_free(self._bio)
libcrypto.RSA_free(self._rsa)
def verify(self, signed):
'''
Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to decrypt message')
return buf[0:size]
|
saltstack/salt
|
salt/utils/rsax931.py
|
RSAX931Signer.sign
|
python
|
def sign(self, msg):
'''
Sign a message (digest) using the private key
:param str msg: The message (digest) to sign
:rtype: str
:return: The signature, or an empty string if the encryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
msg = salt.utils.stringutils.to_bytes(msg)
size = libcrypto.RSA_private_encrypt(len(msg), msg, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to encrypt message')
return buf[0:size]
|
Sign a message (digest) using the private key
:param str msg: The message (digest) to sign
:rtype: str
:return: The signature, or an empty string if the encryption failed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/rsax931.py#L112-L126
| null |
class RSAX931Signer(object):
'''
Create ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
def __init__(self, keydata):
'''
Init an RSAX931Signer instance
:param str keydata: The RSA private key in PEM format
'''
keydata = salt.utils.stringutils.to_bytes(keydata, 'ascii')
self._bio = libcrypto.BIO_new_mem_buf(keydata, len(keydata))
self._rsa = c_void_p(libcrypto.RSA_new())
if not libcrypto.PEM_read_bio_RSAPrivateKey(self._bio, pointer(self._rsa), None, None):
raise ValueError('invalid RSA private key')
def __del__(self):
libcrypto.BIO_free(self._bio)
libcrypto.RSA_free(self._rsa)
|
saltstack/salt
|
salt/utils/rsax931.py
|
RSAX931Verifier.verify
|
python
|
def verify(self, signed):
'''
Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to decrypt message')
return buf[0:size]
|
Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/rsax931.py#L150-L165
| null |
class RSAX931Verifier(object):
'''
Verify ANSI X9.31 RSA signatures using OpenSSL libcrypto
'''
def __init__(self, pubdata):
'''
Init an RSAX931Verifier instance
:param str pubdata: The RSA public key in PEM format
'''
pubdata = salt.utils.stringutils.to_bytes(pubdata, 'ascii')
pubdata = pubdata.replace(b'RSA ', b'')
self._bio = libcrypto.BIO_new_mem_buf(pubdata, len(pubdata))
self._rsa = c_void_p(libcrypto.RSA_new())
if not libcrypto.PEM_read_bio_RSA_PUBKEY(self._bio, pointer(self._rsa), None, None):
raise ValueError('invalid RSA public key')
def __del__(self):
libcrypto.BIO_free(self._bio)
libcrypto.RSA_free(self._rsa)
|
saltstack/salt
|
salt/modules/cabal.py
|
install
|
python
|
def install(pkg=None,
pkgs=None,
user=None,
install_global=False,
env=None):
'''
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
'''
cmd = ['cabal install']
if install_global:
cmd.append('--global')
if pkg:
cmd.append('"{0}"'.format(pkg))
elif pkgs:
cmd.append('"{0}"'.format('" "'.join(pkgs)))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result
|
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cabal.py#L54-L103
| null |
# -*- coding: utf-8 -*-
'''
Manage and query Cabal packages
===============================
.. versionadded:: 2015.8.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.path
from salt.exceptions import CommandExecutionError
logger = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
def __virtual__():
'''
Only work when cabal-install is installed.
'''
return (salt.utils.path.which('cabal') is not None) and \
(salt.utils.path.which('ghc-pkg') is not None)
def update(user=None, env=None):
'''
Updates list of known packages.
user
The user to run cabal update with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function.
CLI Example:
.. code-block:: bash
salt '*' cabal.update
'''
return __salt__['cmd.run_all']('cabal update', runas=user, env=env)
def list_(
pkg=None,
user=None,
installed=False,
env=None):
'''
List packages matching a search string.
pkg
Search string for matching package names
user
The user to run cabal list with
installed
If True, only return installed packages.
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI example:
.. code-block:: bash
salt '*' cabal.list
salt '*' cabal.list ShellCheck
'''
cmd = ['cabal list --simple-output']
if installed:
cmd.append('--installed')
if pkg:
cmd.append('"{0}"'.format(pkg))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
packages = {}
for line in result['stdout'].splitlines():
data = line.split()
package_name = data[0]
package_version = data[1]
packages[package_name] = package_version
return packages
def uninstall(pkg,
user=None,
env=None):
'''
Uninstall a cabal package.
pkg
The package to uninstall
user
The user to run ghc-pkg unregister with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.uninstall ShellCheck
'''
cmd = ['ghc-pkg unregister']
cmd.append('"{0}"'.format(pkg))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result
|
saltstack/salt
|
salt/modules/cabal.py
|
list_
|
python
|
def list_(
pkg=None,
user=None,
installed=False,
env=None):
'''
List packages matching a search string.
pkg
Search string for matching package names
user
The user to run cabal list with
installed
If True, only return installed packages.
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI example:
.. code-block:: bash
salt '*' cabal.list
salt '*' cabal.list ShellCheck
'''
cmd = ['cabal list --simple-output']
if installed:
cmd.append('--installed')
if pkg:
cmd.append('"{0}"'.format(pkg))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
packages = {}
for line in result['stdout'].splitlines():
data = line.split()
package_name = data[0]
package_version = data[1]
packages[package_name] = package_version
return packages
|
List packages matching a search string.
pkg
Search string for matching package names
user
The user to run cabal list with
installed
If True, only return installed packages.
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI example:
.. code-block:: bash
salt '*' cabal.list
salt '*' cabal.list ShellCheck
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cabal.py#L106-L149
| null |
# -*- coding: utf-8 -*-
'''
Manage and query Cabal packages
===============================
.. versionadded:: 2015.8.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.path
from salt.exceptions import CommandExecutionError
logger = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
def __virtual__():
'''
Only work when cabal-install is installed.
'''
return (salt.utils.path.which('cabal') is not None) and \
(salt.utils.path.which('ghc-pkg') is not None)
def update(user=None, env=None):
'''
Updates list of known packages.
user
The user to run cabal update with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function.
CLI Example:
.. code-block:: bash
salt '*' cabal.update
'''
return __salt__['cmd.run_all']('cabal update', runas=user, env=env)
def install(pkg=None,
pkgs=None,
user=None,
install_global=False,
env=None):
'''
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
'''
cmd = ['cabal install']
if install_global:
cmd.append('--global')
if pkg:
cmd.append('"{0}"'.format(pkg))
elif pkgs:
cmd.append('"{0}"'.format('" "'.join(pkgs)))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result
def uninstall(pkg,
user=None,
env=None):
'''
Uninstall a cabal package.
pkg
The package to uninstall
user
The user to run ghc-pkg unregister with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.uninstall ShellCheck
'''
cmd = ['ghc-pkg unregister']
cmd.append('"{0}"'.format(pkg))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result
|
saltstack/salt
|
salt/modules/cabal.py
|
uninstall
|
python
|
def uninstall(pkg,
user=None,
env=None):
'''
Uninstall a cabal package.
pkg
The package to uninstall
user
The user to run ghc-pkg unregister with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.uninstall ShellCheck
'''
cmd = ['ghc-pkg unregister']
cmd.append('"{0}"'.format(pkg))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result
|
Uninstall a cabal package.
pkg
The package to uninstall
user
The user to run ghc-pkg unregister with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.uninstall ShellCheck
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cabal.py#L152-L182
| null |
# -*- coding: utf-8 -*-
'''
Manage and query Cabal packages
===============================
.. versionadded:: 2015.8.0
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.path
from salt.exceptions import CommandExecutionError
logger = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
def __virtual__():
'''
Only work when cabal-install is installed.
'''
return (salt.utils.path.which('cabal') is not None) and \
(salt.utils.path.which('ghc-pkg') is not None)
def update(user=None, env=None):
'''
Updates list of known packages.
user
The user to run cabal update with
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function.
CLI Example:
.. code-block:: bash
salt '*' cabal.update
'''
return __salt__['cmd.run_all']('cabal update', runas=user, env=env)
def install(pkg=None,
pkgs=None,
user=None,
install_global=False,
env=None):
'''
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
'''
cmd = ['cabal install']
if install_global:
cmd.append('--global')
if pkg:
cmd.append('"{0}"'.format(pkg))
elif pkgs:
cmd.append('"{0}"'.format('" "'.join(pkgs)))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result
def list_(
pkg=None,
user=None,
installed=False,
env=None):
'''
List packages matching a search string.
pkg
Search string for matching package names
user
The user to run cabal list with
installed
If True, only return installed packages.
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI example:
.. code-block:: bash
salt '*' cabal.list
salt '*' cabal.list ShellCheck
'''
cmd = ['cabal list --simple-output']
if installed:
cmd.append('--installed')
if pkg:
cmd.append('"{0}"'.format(pkg))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
packages = {}
for line in result['stdout'].splitlines():
data = line.split()
package_name = data[0]
package_version = data[1]
packages[package_name] = package_version
return packages
|
saltstack/salt
|
salt/renderers/yamlex.py
|
render
|
python
|
def render(sls_data, saltenv='base', sls='', **kws):
'''
Accepts YAML_EX as a string or as a file object and runs it through the YAML_EX
parser.
:rtype: A Python data structure
'''
with warnings.catch_warnings(record=True) as warn_list:
data = deserialize(sls_data) or {}
for item in warn_list:
log.warning(
'%s found in %s saltenv=%s',
item.message, salt.utils.url.create(sls), saltenv
)
log.debug('Results of SLS rendering: \n%s', data)
return data
|
Accepts YAML_EX as a string or as a file object and runs it through the YAML_EX
parser.
:rtype: A Python data structure
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/yamlex.py#L15-L33
|
[
"def create(path, saltenv=None):\n '''\n join `path` and `saltenv` into a 'salt://' URL.\n '''\n if salt.utils.platform.is_windows():\n path = salt.utils.path.sanitize_win_path(path)\n path = salt.utils.data.decode(path)\n\n query = 'saltenv={0}'.format(saltenv) if saltenv else ''\n url = salt.utils.data.decode(urlunparse(('file', '', path, '', query, '')))\n return 'salt://{0}'.format(url[len('file:///'):])\n",
"def deserialize(stream_or_string, **options):\n '''\n Deserialize any string of stream like object into a Python data structure.\n\n :param stream_or_string: stream or string to deserialize.\n :param options: options given to lower yaml module.\n '''\n\n options.setdefault('Loader', Loader)\n try:\n return yaml.load(stream_or_string, **options)\n except ScannerError as error:\n log.exception('Error encountered while deserializing')\n err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')\n line_num = error.problem_mark.line + 1\n raise DeserializationError(err_type,\n line_num,\n error.problem_mark.buffer)\n except ConstructorError as error:\n log.exception('Error encountered while deserializing')\n raise DeserializationError(error)\n except Exception as error:\n log.exception('Error encountered while deserializing')\n raise DeserializationError(error)\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
import warnings
# Import salt libs
import salt.utils.url
from salt.serializers.yamlex import deserialize
log = logging.getLogger(__name__)
|
saltstack/salt
|
salt/returners/django_return.py
|
returner
|
python
|
def returner(ret):
'''
Signal a Django server that a return is available
'''
signaled = dispatch.Signal(providing_args=['ret']).send(sender='returner', ret=ret)
for signal in signaled:
log.debug(
'Django returner function \'returner\' signaled %s '
'which responded with %s', signal[0], signal[1]
)
|
Signal a Django server that a return is available
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/django_return.py#L57-L67
| null |
# -*- coding: utf-8 -*-
'''
A returner that will inform a Django system that
returns are available using Django's signal system.
https://docs.djangoproject.com/en/dev/topics/signals/
It is up to the Django developer to register necessary
handlers with the signals provided by this returner
and process returns as necessary.
The easiest way to use signals is to import them from
this returner directly and then use a decorator to register
them.
An example Django module that registers a function called
'returner_callback' with this module's 'returner' function:
.. code-block:: python
import salt.returners.django_return
from django.dispatch import receiver
@receiver(salt.returners.django_return, sender=returner)
def returner_callback(sender, ret):
print('I received {0} from {1}'.format(ret, sender))
'''
# Import Python libraries
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libraries
import salt.returners
import salt.utils.jid
log = logging.getLogger(__name__)
HAS_DJANGO = False
try:
from django import dispatch
HAS_DJANGO = True
except ImportError:
HAS_DJANGO = False
# Define this module's virtual name
__virtualname__ = 'django'
def __virtual__():
if not HAS_DJANGO:
return False, 'Could not import django returner; django is not installed.'
return True
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
signaled = dispatch.Signal(
providing_args=['jid', 'load']).send(
sender='save_load', jid=jid, load=load)
for signal in signaled:
log.debug(
'Django returner function \'save_load\' signaled %s '
'which responded with %s', signal[0], signal[1]
)
def prep_jid(nocache=False, passed_jid=None):
'''
Do any work necessary to prepare a JID, including sending a custom ID
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/django_return.py
|
save_load
|
python
|
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
signaled = dispatch.Signal(
providing_args=['jid', 'load']).send(
sender='save_load', jid=jid, load=load)
for signal in signaled:
log.debug(
'Django returner function \'save_load\' signaled %s '
'which responded with %s', signal[0], signal[1]
)
|
Save the load to the specified jid
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/django_return.py#L70-L82
| null |
# -*- coding: utf-8 -*-
'''
A returner that will inform a Django system that
returns are available using Django's signal system.
https://docs.djangoproject.com/en/dev/topics/signals/
It is up to the Django developer to register necessary
handlers with the signals provided by this returner
and process returns as necessary.
The easiest way to use signals is to import them from
this returner directly and then use a decorator to register
them.
An example Django module that registers a function called
'returner_callback' with this module's 'returner' function:
.. code-block:: python
import salt.returners.django_return
from django.dispatch import receiver
@receiver(salt.returners.django_return, sender=returner)
def returner_callback(sender, ret):
print('I received {0} from {1}'.format(ret, sender))
'''
# Import Python libraries
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libraries
import salt.returners
import salt.utils.jid
log = logging.getLogger(__name__)
HAS_DJANGO = False
try:
from django import dispatch
HAS_DJANGO = True
except ImportError:
HAS_DJANGO = False
# Define this module's virtual name
__virtualname__ = 'django'
def __virtual__():
if not HAS_DJANGO:
return False, 'Could not import django returner; django is not installed.'
return True
def returner(ret):
'''
Signal a Django server that a return is available
'''
signaled = dispatch.Signal(providing_args=['ret']).send(sender='returner', ret=ret)
for signal in signaled:
log.debug(
'Django returner function \'returner\' signaled %s '
'which responded with %s', signal[0], signal[1]
)
def prep_jid(nocache=False, passed_jid=None):
'''
Do any work necessary to prepare a JID, including sending a custom ID
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/django_return.py
|
prep_jid
|
python
|
def prep_jid(nocache=False, passed_jid=None):
'''
Do any work necessary to prepare a JID, including sending a custom ID
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
Do any work necessary to prepare a JID, including sending a custom ID
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/django_return.py#L85-L89
|
[
"def gen_jid(opts=None):\n '''\n Generate a jid\n '''\n if opts is None:\n salt.utils.versions.warn_until(\n 'Sodium',\n 'The `opts` argument was not passed into salt.utils.jid.gen_jid(). '\n 'This will be required starting in {version}.'\n )\n opts = {}\n global LAST_JID_DATETIME # pylint: disable=global-statement\n\n if opts.get('utc_jid', False):\n jid_dt = datetime.datetime.utcnow()\n else:\n jid_dt = datetime.datetime.now()\n if not opts.get('unique_jid', False):\n return '{0:%Y%m%d%H%M%S%f}'.format(jid_dt)\n if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:\n jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)\n LAST_JID_DATETIME = jid_dt\n return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid())\n"
] |
# -*- coding: utf-8 -*-
'''
A returner that will inform a Django system that
returns are available using Django's signal system.
https://docs.djangoproject.com/en/dev/topics/signals/
It is up to the Django developer to register necessary
handlers with the signals provided by this returner
and process returns as necessary.
The easiest way to use signals is to import them from
this returner directly and then use a decorator to register
them.
An example Django module that registers a function called
'returner_callback' with this module's 'returner' function:
.. code-block:: python
import salt.returners.django_return
from django.dispatch import receiver
@receiver(salt.returners.django_return, sender=returner)
def returner_callback(sender, ret):
print('I received {0} from {1}'.format(ret, sender))
'''
# Import Python libraries
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libraries
import salt.returners
import salt.utils.jid
log = logging.getLogger(__name__)
HAS_DJANGO = False
try:
from django import dispatch
HAS_DJANGO = True
except ImportError:
HAS_DJANGO = False
# Define this module's virtual name
__virtualname__ = 'django'
def __virtual__():
if not HAS_DJANGO:
return False, 'Could not import django returner; django is not installed.'
return True
def returner(ret):
'''
Signal a Django server that a return is available
'''
signaled = dispatch.Signal(providing_args=['ret']).send(sender='returner', ret=ret)
for signal in signaled:
log.debug(
'Django returner function \'returner\' signaled %s '
'which responded with %s', signal[0], signal[1]
)
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
signaled = dispatch.Signal(
providing_args=['jid', 'load']).send(
sender='save_load', jid=jid, load=load)
for signal in signaled:
log.debug(
'Django returner function \'save_load\' signaled %s '
'which responded with %s', signal[0], signal[1]
)
|
saltstack/salt
|
salt/client/netapi.py
|
NetapiClient.run
|
python
|
def run(self):
'''
Load and start all available api modules
'''
if not len(self.netapi):
log.error("Did not find any netapi configurations, nothing to start")
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
for fun in self.netapi:
if fun.endswith('.start'):
log.info('Starting %s netapi module', fun)
self.process_manager.add_process(
RunNetapi,
args=(self.opts, fun),
kwargs=kwargs,
name='RunNetapi'
)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
|
Load and start all available api modules
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/netapi.py#L61-L92
| null |
class NetapiClient(object):
'''
Start each netapi module that is configured to run
'''
def __init__(self, opts):
self.opts = opts
self.process_manager = salt.utils.process.ProcessManager(name='NetAPIProcessManager')
self.netapi = salt.loader.netapi(self.opts)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
|
saltstack/salt
|
salt/tops/varstack_top.py
|
top
|
python
|
def top(**kwargs):
'''
Query |varstack| for the top data (states of the minions).
'''
conf = __opts__['master_tops']['varstack']
__grains__ = kwargs['grains']
vs_ = varstack.Varstack(config_filename=conf)
ret = vs_.evaluate(__grains__)
return {'base': ret['states']}
|
Query |varstack| for the top data (states of the minions).
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/tops/varstack_top.py#L61-L71
| null |
# -*- coding: utf-8 -*-
'''
Use `Varstack <https://github.com/conversis/varstack>`_ to provide tops data
.. |varstack| replace:: **varstack**
This :ref:`master_tops <master-tops-system>` plugin provides access to
the |varstack| hierarchical yaml files, so you can user |varstack| as a full
:mod:`external node classifier <salt.tops.ext_nodes>` and
store state information (top data) in it.
Configuring Varstack
====================
To use varstack as a master top external node classifier, install varstack
as documented. Then, add to your master's configuration:
.. code-block:: yaml
master_tops:
varstack: /path/to/the/config/file/varstack.yaml
Varstack will then use /path/to/the/config/file/varstack.yaml (usually
/etc/varstack.yaml) to determine which configuration
data to return as adapter information. From there you can take a look at the
`README <https://github.com/conversis/varstack/blob/master/README.md>`_ of
varstack to learn how this file is evaluated. The ENC part will just return
the 'states' dictionary for the node.
Ie, if my.fqdn.yaml file contains:
.. code-block:: yaml
---
states:
- sudo
- openssh
- apache
- salt.minion
these will be returned as {'base': ['sudo', 'openssh', 'apache', 'salt.minion']} and
managed by salt as if given from a top.sls file.
'''
from __future__ import absolute_import, print_function, unicode_literals
try:
import varstack
except ImportError:
varstack = None
# Define the module's virtual name
__virtualname__ = 'varstack'
def __virtual__():
return (False, 'varstack not installed') if varstack is None else __virtualname__
|
saltstack/salt
|
salt/auth/keystone.py
|
auth
|
python
|
def auth(username, password):
'''
Try and authenticate
'''
try:
keystone = client.Client(username=username, password=password,
auth_url=get_auth_url())
return keystone.authenticate()
except (AuthorizationFailure, Unauthorized):
return False
|
Try and authenticate
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/auth/keystone.py#L26-L35
|
[
"def get_auth_url():\n '''\n Try and get the URL from the config, else return localhost\n '''\n try:\n return __opts__['keystone.auth_url']\n except KeyError:\n return 'http://localhost:35357/v2.0'\n"
] |
# -*- coding: utf-8 -*-
'''
Provide authentication using OpenStack Keystone
:depends: - keystoneclient Python module
'''
from __future__ import absolute_import, print_function, unicode_literals
try:
from keystoneclient.v2_0 import client
from keystoneclient.exceptions import AuthorizationFailure, Unauthorized
except ImportError:
pass
def get_auth_url():
'''
Try and get the URL from the config, else return localhost
'''
try:
return __opts__['keystone.auth_url']
except KeyError:
return 'http://localhost:35357/v2.0'
if __name__ == '__main__':
__opts__ = {}
if auth('test', 'test'):
print("Authenticated")
else:
print("Failed to authenticate")
|
saltstack/salt
|
salt/modules/xml.py
|
get_value
|
python
|
def get_value(file, element):
'''
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.text
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
|
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xml.py#L26-L42
| null |
# -*- coding: utf-8 -*-
'''
XML file mangler
.. versionadded:: Neon
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import xml.etree.ElementTree as ET
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'xml'
def __virtual__():
'''
Only load the module if all modules are imported correctly.
'''
return __virtualname__
def set_value(file, element, value):
'''
Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value"
'''
try:
root = ET.parse(file)
relement = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
relement.text = str(value)
root.write(file)
return True
def get_attribute(file, element):
'''
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.attrib
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def set_attribute(file, element, key, value):
'''
Set the requested attribute key and value for matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal"
'''
try:
root = ET.parse(file)
element = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
element.set(key, str(value))
root.write(file)
return True
|
saltstack/salt
|
salt/modules/xml.py
|
set_value
|
python
|
def set_value(file, element, value):
'''
Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value"
'''
try:
root = ET.parse(file)
relement = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
relement.text = str(value)
root.write(file)
return True
|
Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xml.py#L45-L63
| null |
# -*- coding: utf-8 -*-
'''
XML file mangler
.. versionadded:: Neon
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import xml.etree.ElementTree as ET
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'xml'
def __virtual__():
'''
Only load the module if all modules are imported correctly.
'''
return __virtualname__
def get_value(file, element):
'''
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.text
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def get_attribute(file, element):
'''
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.attrib
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def set_attribute(file, element, key, value):
'''
Set the requested attribute key and value for matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal"
'''
try:
root = ET.parse(file)
element = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
element.set(key, str(value))
root.write(file)
return True
|
saltstack/salt
|
salt/modules/xml.py
|
get_attribute
|
python
|
def get_attribute(file, element):
'''
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.attrib
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
|
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xml.py#L66-L82
| null |
# -*- coding: utf-8 -*-
'''
XML file mangler
.. versionadded:: Neon
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import xml.etree.ElementTree as ET
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'xml'
def __virtual__():
'''
Only load the module if all modules are imported correctly.
'''
return __virtualname__
def get_value(file, element):
'''
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.text
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def set_value(file, element, value):
'''
Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value"
'''
try:
root = ET.parse(file)
relement = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
relement.text = str(value)
root.write(file)
return True
def set_attribute(file, element, key, value):
'''
Set the requested attribute key and value for matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal"
'''
try:
root = ET.parse(file)
element = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
element.set(key, str(value))
root.write(file)
return True
|
saltstack/salt
|
salt/modules/xml.py
|
set_attribute
|
python
|
def set_attribute(file, element, key, value):
'''
Set the requested attribute key and value for matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal"
'''
try:
root = ET.parse(file)
element = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
element.set(key, str(value))
root.write(file)
return True
|
Set the requested attribute key and value for matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xml.py#L85-L103
| null |
# -*- coding: utf-8 -*-
'''
XML file mangler
.. versionadded:: Neon
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import xml.etree.ElementTree as ET
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'xml'
def __virtual__():
'''
Only load the module if all modules are imported correctly.
'''
return __virtualname__
def get_value(file, element):
'''
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.text
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def set_value(file, element, value):
'''
Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value"
'''
try:
root = ET.parse(file)
relement = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
relement.text = str(value)
root.write(file)
return True
def get_attribute(file, element):
'''
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.attrib
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
|
saltstack/salt
|
salt/utils/openstack/swift.py
|
SaltSwift.get_account
|
python
|
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
|
List Swift containers
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/swift.py#L97-L109
| null |
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if 'key' not in self.kwargs:
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = sys.stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.files.fopen(local_file, 'wb') # pylint: disable=resource-leakage
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
with salt.utils.files.fopen(local_file, 'rb') as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
|
saltstack/salt
|
salt/utils/openstack/swift.py
|
SaltSwift.get_object
|
python
|
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = sys.stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.files.fopen(local_file, 'wb') # pylint: disable=resource-leakage
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
|
Retrieve a file from Swift
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/swift.py#L165-L197
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def mkdirs(path):\n try:\n makedirs(path)\n except OSError as err:\n if err.errno != EEXIST:\n raise\n"
] |
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if 'key' not in self.kwargs:
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
with salt.utils.files.fopen(local_file, 'rb') as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
|
saltstack/salt
|
salt/utils/openstack/swift.py
|
SaltSwift.put_object
|
python
|
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
with salt.utils.files.fopen(local_file, 'rb') as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
|
Upload a file to Swift
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/swift.py#L199-L212
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n"
] |
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if 'key' not in self.kwargs:
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = sys.stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.files.fopen(local_file, 'wb') # pylint: disable=resource-leakage
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
|
saltstack/salt
|
salt/utils/openstack/swift.py
|
SaltSwift.delete_object
|
python
|
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
|
Delete a file from Swift
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/swift.py#L214-L226
| null |
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if 'key' not in self.kwargs:
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = sys.stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.files.fopen(local_file, 'wb') # pylint: disable=resource-leakage
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
with salt.utils.files.fopen(local_file, 'rb') as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
|
saltstack/salt
|
salt/modules/xfs.py
|
_xfs_info_get_kv
|
python
|
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
|
Parse one line of the XFS info output.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L71-L90
| null |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_parse_xfs_info
|
python
|
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
|
Parse output from "xfs_info" or "xfs_growfs -n".
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L93-L109
|
[
"def _xfs_info_get_kv(serialized):\n '''\n Parse one line of the XFS info output.\n '''\n # No need to know sub-elements here\n if serialized.startswith(\"=\"):\n serialized = serialized[1:].strip()\n\n serialized = serialized.replace(\" = \", \"=*** \").replace(\" =\", \"=\")\n\n # Keywords has no spaces, values do\n opt = []\n for tkn in serialized.split(\" \"):\n if not opt or \"=\" in tkn:\n opt.append(tkn)\n else:\n opt[len(opt) - 1] = opt[len(opt) - 1] + \" \" + tkn\n\n # Preserve ordering\n return [tuple(items.split(\"=\")) for items in opt]\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
info
|
python
|
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
|
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L112-L125
|
[
"def _parse_xfs_info(data):\n '''\n Parse output from \"xfs_info\" or \"xfs_growfs -n\".\n '''\n ret = {}\n spr = re.compile(r'\\s+')\n entry = None\n for line in [spr.sub(\" \", l).strip().replace(\", \", \" \") for l in data.split(\"\\n\")]:\n if not line:\n continue\n nfo = _xfs_info_get_kv(line)\n if not line.startswith(\"=\"):\n entry = nfo.pop(0)\n ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}\n ret[entry[0]].update(dict(nfo))\n\n return ret\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_xfsdump_output
|
python
|
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
|
Parse CLI output of the xfsdump utility.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L128-L160
| null |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
dump
|
python
|
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
|
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L163-L208
|
[
"def _verify_run(out, cmd=None):\n '''\n Crash to the log if command execution was not successful.\n '''\n if out.get(\"retcode\", 0) and out['stderr']:\n if cmd:\n log.debug('Command: \"%s\"', cmd)\n\n log.debug('Return code: %s', out.get('retcode'))\n log.debug('Error output:\\n%s', out.get('stderr', \"N/A\"))\n\n raise CommandExecutionError(out['stderr'])\n",
"def _xfsdump_output(data):\n '''\n Parse CLI output of the xfsdump utility.\n '''\n out = {}\n summary = []\n summary_block = False\n\n for line in [l.strip() for l in data.split(\"\\n\") if l.strip()]:\n line = re.sub(\"^xfsdump: \", \"\", line)\n if line.startswith(\"session id:\"):\n out['Session ID'] = line.split(\" \")[-1]\n elif line.startswith(\"session label:\"):\n out['Session label'] = re.sub(\"^session label: \", \"\", line)\n elif line.startswith(\"media file size\"):\n out['Media size'] = re.sub(r\"^media file size\\s+\", \"\", line)\n elif line.startswith(\"dump complete:\"):\n out['Dump complete'] = re.sub(r\"^dump complete:\\s+\", \"\", line)\n elif line.startswith(\"Dump Status:\"):\n out['Status'] = re.sub(r\"^Dump Status:\\s+\", \"\", line)\n elif line.startswith(\"Dump Summary:\"):\n summary_block = True\n continue\n\n if line.startswith(\" \") and summary_block:\n summary.append(line.strip())\n elif not line.startswith(\" \") and summary_block:\n summary_block = False\n\n if summary:\n out['Summary'] = ' '.join(summary)\n\n return out\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_xr_to_keyset
|
python
|
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
|
Parse xfsrestore output keyset elements.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L211-L220
| null |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_xfs_inventory_output
|
python
|
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
|
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L223-L256
|
[
"def _xr_to_keyset(line):\n '''\n Parse xfsrestore output keyset elements.\n '''\n tkns = [elm for elm in line.strip().split(\":\", 1) if elm]\n if len(tkns) == 1:\n return \"'{0}': \".format(tkns[0])\n else:\n key, val = tkns\n return \"'{0}': '{1}',\".format(key.strip(), val.strip())\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_xfs_prune_output
|
python
|
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
|
Parse prune output.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L275-L297
| null |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
prune_dump
|
python
|
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
|
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L300-L318
|
[
"def _verify_run(out, cmd=None):\n '''\n Crash to the log if command execution was not successful.\n '''\n if out.get(\"retcode\", 0) and out['stderr']:\n if cmd:\n log.debug('Command: \"%s\"', cmd)\n\n log.debug('Return code: %s', out.get('retcode'))\n log.debug('Error output:\\n%s', out.get('stderr', \"N/A\"))\n\n raise CommandExecutionError(out['stderr'])\n",
"def _xfs_prune_output(out, uuid):\n '''\n Parse prune output.\n '''\n data = {}\n cnt = []\n cutpoint = False\n for line in [l.strip() for l in out.split(\"\\n\") if l]:\n if line.startswith(\"-\"):\n if cutpoint:\n break\n else:\n cutpoint = True\n continue\n\n if cutpoint:\n cnt.append(line)\n\n for kset in [e for e in cnt[1:] if ':' in e]:\n key, val = [t.strip() for t in kset.split(\":\", 1)]\n data[key.lower().replace(\" \", \"_\")] = val\n\n return data.get('uuid') == uuid and data or {}\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_xfs_estimate_output
|
python
|
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
|
Parse xfs_estimate output.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L360-L375
| null |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
estimate
|
python
|
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
|
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L378-L398
|
[
"def _verify_run(out, cmd=None):\n '''\n Crash to the log if command execution was not successful.\n '''\n if out.get(\"retcode\", 0) and out['stderr']:\n if cmd:\n log.debug('Command: \"%s\"', cmd)\n\n log.debug('Return code: %s', out.get('retcode'))\n log.debug('Error output:\\n%s', out.get('stderr', \"N/A\"))\n\n raise CommandExecutionError(out['stderr'])\n",
"def _xfs_estimate_output(out):\n '''\n Parse xfs_estimate output.\n '''\n spc = re.compile(r\"\\s+\")\n data = {}\n for line in [l for l in out.split(\"\\n\") if l.strip()][1:]:\n directory, bsize, blocks, megabytes, logsize = spc.sub(\" \", line).split(\" \")\n data[directory] = {\n 'block _size': bsize,\n 'blocks': blocks,\n 'megabytes': megabytes,\n 'logsize': logsize,\n }\n\n return data\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
mkfs
|
python
|
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
|
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L401-L462
|
[
"def _verify_run(out, cmd=None):\n '''\n Crash to the log if command execution was not successful.\n '''\n if out.get(\"retcode\", 0) and out['stderr']:\n if cmd:\n log.debug('Command: \"%s\"', cmd)\n\n log.debug('Return code: %s', out.get('retcode'))\n log.debug('Error output:\\n%s', out.get('stderr', \"N/A\"))\n\n raise CommandExecutionError(out['stderr'])\n",
"def _parse_xfs_info(data):\n '''\n Parse output from \"xfs_info\" or \"xfs_growfs -n\".\n '''\n ret = {}\n spr = re.compile(r'\\s+')\n entry = None\n for line in [spr.sub(\" \", l).strip().replace(\", \", \" \") for l in data.split(\"\\n\")]:\n if not line:\n continue\n nfo = _xfs_info_get_kv(line)\n if not line.startswith(\"=\"):\n entry = nfo.pop(0)\n ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}\n ret[entry[0]].update(dict(nfo))\n\n return ret\n",
"getopts = lambda args: dict(((args and (\"=\" in args)\n and args or None)) and [kw.split(\"=\") for kw in args.split(\",\")] or [])\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
modify
|
python
|
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
|
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L465-L506
|
[
"def _verify_run(out, cmd=None):\n '''\n Crash to the log if command execution was not successful.\n '''\n if out.get(\"retcode\", 0) and out['stderr']:\n if cmd:\n log.debug('Command: \"%s\"', cmd)\n\n log.debug('Return code: %s', out.get('retcode'))\n log.debug('Error output:\\n%s', out.get('stderr', \"N/A\"))\n\n raise CommandExecutionError(out['stderr'])\n",
"def _blkid_output(out):\n '''\n Parse blkid output.\n '''\n flt = lambda data: [el for el in data if el.strip()]\n data = {}\n for dev_meta in flt(out.split(\"\\n\\n\")):\n dev = {}\n for items in flt(dev_meta.strip().split(\"\\n\")):\n key, val = items.split(\"=\", 1)\n dev[key.lower()] = val\n if dev.pop(\"type\") == \"xfs\":\n dev['label'] = dev.get('label')\n data[dev.pop(\"devname\")] = dev\n\n mounts = _get_mounts()\n for device in six.iterkeys(mounts):\n if data.get(device):\n data[device].update(mounts[device])\n\n return data\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
_get_mounts
|
python
|
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
|
List mounted filesystems.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L509-L524
|
[
"def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n",
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
saltstack/salt
|
salt/modules/xfs.py
|
defragment
|
python
|
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}
|
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L527-L549
|
[
"def _verify_run(out, cmd=None):\n '''\n Crash to the log if command execution was not successful.\n '''\n if out.get(\"retcode\", 0) and out['stderr']:\n if cmd:\n log.debug('Command: \"%s\"', cmd)\n\n log.debug('Return code: %s', out.get('retcode'))\n log.debug('Error output:\\n%s', out.get('stderr', \"N/A\"))\n\n raise CommandExecutionError(out['stderr'])\n",
"def _get_mounts():\n '''\n List mounted filesystems.\n '''\n mounts = {}\n with salt.utils.files.fopen(\"/proc/mounts\") as fhr:\n for line in salt.utils.data.decode(fhr.readlines()):\n device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(\" \")\n if fstype != 'xfs':\n continue\n mounts[device] = {\n 'mount_point': mntpnt,\n 'options': options.split(\",\"),\n }\n\n return mounts\n"
] |
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import time
import logging
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.data
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() \
and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "%s"', cmd)
log.debug('Return code: %s', out.get('retcode'))
log.debug('Error output:\n%s', out.get('stderr', "N/A"))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.path.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in six.iterkeys(mounts):
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and [kw.split("=") for kw in args.split(",")] or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
with salt.utils.files.fopen("/proc/mounts") as fhr:
for line in salt.utils.data.decode(fhr.readlines()):
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
|
saltstack/salt
|
salt/modules/hashutil.py
|
digest
|
python
|
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
|
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hashutil.py#L26-L53
| null |
# encoding: utf-8
'''
A collection of hashing and encoding functions
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
from StringIO import StringIO
BytesIO = StringIO
elif six.PY3:
from io import BytesIO, StringIO
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = BytesIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.text_type:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
|
saltstack/salt
|
salt/modules/hashutil.py
|
digest_file
|
python
|
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
|
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hashutil.py#L56-L80
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n"
] |
# encoding: utf-8
'''
A collection of hashing and encoding functions
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
from StringIO import StringIO
BytesIO = StringIO
elif six.PY3:
from io import BytesIO, StringIO
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = BytesIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.text_type:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
|
saltstack/salt
|
salt/modules/hashutil.py
|
base64_encodefile
|
python
|
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = BytesIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
|
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hashutil.py#L135-L165
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n"
] |
# encoding: utf-8
'''
A collection of hashing and encoding functions
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
from StringIO import StringIO
BytesIO = StringIO
elif six.PY3:
from io import BytesIO, StringIO
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.text_type:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
|
saltstack/salt
|
salt/modules/hashutil.py
|
base64_decodefile
|
python
|
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
|
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hashutil.py#L184-L201
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n"
] |
# encoding: utf-8
'''
A collection of hashing and encoding functions
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
from StringIO import StringIO
BytesIO = StringIO
elif six.PY3:
from io import BytesIO, StringIO
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = BytesIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.text_type:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
|
saltstack/salt
|
salt/modules/hashutil.py
|
hmac_signature
|
python
|
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
|
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hashutil.py#L249-L263
| null |
# encoding: utf-8
'''
A collection of hashing and encoding functions
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
from StringIO import StringIO
BytesIO = StringIO
elif six.PY3:
from io import BytesIO, StringIO
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = BytesIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.text_type:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
|
saltstack/salt
|
salt/modules/hashutil.py
|
github_signature
|
python
|
def github_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
'''
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split('=')
if six.text_type:
msg = salt.utils.stringutils.to_bytes(msg)
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
|
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hashutil.py#L266-L288
| null |
# encoding: utf-8
'''
A collection of hashing and encoding functions
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import hashlib
import hmac
# Import Salt libs
import salt.exceptions
from salt.ext import six
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
if six.PY2:
from StringIO import StringIO
BytesIO = StringIO
elif six.PY3:
from io import BytesIO, StringIO
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr)
def digest_file(infile, checksum='md5'):
'''
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
'''
if not __salt__['file.file_exists'](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{0}' not found.".format(infile))
with salt.utils.files.fopen(infile, 'rb') as f:
file_hash = __salt__['hashutil.digest'](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
'''
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
'''
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
'''
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
'''
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
'''
Encode a string as base64 using the "legacy" Python interface.
Among other possible differences, the "legacy" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
'''
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
'''
encoded_f = BytesIO()
with salt.utils.files.fopen(fname, 'rb') as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
def base64_decodestring(instr):
'''
Decode a base64-encoded string using the "legacy" Python interface
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
'''
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r'''
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
'''
encoded_f = StringIO(instr)
with salt.utils.files.fopen(outfile, 'wb') as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
'''
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
|
saltstack/salt
|
salt/thorium/wheel.py
|
cmd
|
python
|
def cmd(
name,
fun=None,
arg=(),
**kwargs):
'''
Execute a runner asynchronous:
USAGE:
.. code-block:: yaml
run_cloud:
wheel.cmd:
- fun: key.delete
- match: minion_id
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if fun is None:
fun = name
client = salt.wheel.WheelClient(__opts__)
low = {'fun': fun,
'arg': arg,
'kwargs': kwargs}
client.cmd_async(low)
return ret
|
Execute a runner asynchronous:
USAGE:
.. code-block:: yaml
run_cloud:
wheel.cmd:
- fun: key.delete
- match: minion_id
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/wheel.py#L11-L39
|
[
"def cmd_async(self, low):\n '''\n Execute a function asynchronously; eauth is respected\n\n This function requires that :conf_master:`external_auth` is configured\n and the user is authorized\n\n .. code-block:: python\n\n >>> wheel.cmd_async({\n 'fun': 'key.finger',\n 'match': 'jerry',\n 'eauth': 'auto',\n 'username': 'saltdev',\n 'password': 'saltdev',\n })\n {'jid': '20131219224744416681', 'tag': 'salt/wheel/20131219224744416681'}\n '''\n fun = low.pop('fun')\n return self.asynchronous(fun, low)\n"
] |
# -*- coding: utf-8 -*-
'''
React by calling asynchronous runners
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# import salt libs
import salt.wheel
|
saltstack/salt
|
salt/modules/inspector.py
|
_
|
python
|
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
|
Get inspectlib module for the lazy loader.
:param module:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L45-L68
| null |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/modules/inspector.py
|
inspect
|
python
|
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
|
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L71-L103
|
[
"def _(module):\n '''\n Get inspectlib module for the lazy loader.\n\n :param module:\n :return:\n '''\n\n mod = None\n # pylint: disable=E0598\n try:\n # importlib is in Python 2.7+ and 3+\n import importlib\n mod = importlib.import_module(\"salt.modules.inspectlib.{0}\".format(module))\n except ImportError:\n # No importlib around (2.6)\n mod = getattr(__import__(\"salt.modules.inspectlib\", globals(), locals(), fromlist=[six.text_type(module)]), module)\n # pylint: enable=E0598\n\n mod.__grains__ = __grains__\n mod.__pillar__ = __pillar__\n mod.__salt__ = __salt__\n\n return mod\n",
"def get_error_message(error):\n '''\n Get human readable message from Python Exception\n '''\n return error.args[0] if error.args else ''\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/modules/inspector.py
|
query
|
python
|
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
|
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L106-L168
|
[
"def _(module):\n '''\n Get inspectlib module for the lazy loader.\n\n :param module:\n :return:\n '''\n\n mod = None\n # pylint: disable=E0598\n try:\n # importlib is in Python 2.7+ and 3+\n import importlib\n mod = importlib.import_module(\"salt.modules.inspectlib.{0}\".format(module))\n except ImportError:\n # No importlib around (2.6)\n mod = getattr(__import__(\"salt.modules.inspectlib\", globals(), locals(), fromlist=[six.text_type(module)]), module)\n # pylint: enable=E0598\n\n mod.__grains__ = __grains__\n mod.__pillar__ = __pillar__\n mod.__salt__ = __salt__\n\n return mod\n",
"def get_error_message(error):\n '''\n Get human readable message from Python Exception\n '''\n return error.args[0] if error.args else ''\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/modules/inspector.py
|
build
|
python
|
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
|
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L171-L198
|
[
"def _(module):\n '''\n Get inspectlib module for the lazy loader.\n\n :param module:\n :return:\n '''\n\n mod = None\n # pylint: disable=E0598\n try:\n # importlib is in Python 2.7+ and 3+\n import importlib\n mod = importlib.import_module(\"salt.modules.inspectlib.{0}\".format(module))\n except ImportError:\n # No importlib around (2.6)\n mod = getattr(__import__(\"salt.modules.inspectlib\", globals(), locals(), fromlist=[six.text_type(module)]), module)\n # pylint: enable=E0598\n\n mod.__grains__ = __grains__\n mod.__pillar__ = __pillar__\n mod.__salt__ = __salt__\n\n return mod\n",
"def get_error_message(error):\n '''\n Get human readable message from Python Exception\n '''\n return error.args[0] if error.args else ''\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/modules/inspector.py
|
export
|
python
|
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
|
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L201-L227
|
[
"def _(module):\n '''\n Get inspectlib module for the lazy loader.\n\n :param module:\n :return:\n '''\n\n mod = None\n # pylint: disable=E0598\n try:\n # importlib is in Python 2.7+ and 3+\n import importlib\n mod = importlib.import_module(\"salt.modules.inspectlib.{0}\".format(module))\n except ImportError:\n # No importlib around (2.6)\n mod = getattr(__import__(\"salt.modules.inspectlib\", globals(), locals(), fromlist=[six.text_type(module)]), module)\n # pylint: enable=E0598\n\n mod.__grains__ = __grains__\n mod.__pillar__ = __pillar__\n mod.__salt__ = __salt__\n\n return mod\n",
"def get_error_message(error):\n '''\n Get human readable message from Python Exception\n '''\n return error.args[0] if error.args else ''\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/modules/inspector.py
|
snapshots
|
python
|
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L230-L247
|
[
"def _(module):\n '''\n Get inspectlib module for the lazy loader.\n\n :param module:\n :return:\n '''\n\n mod = None\n # pylint: disable=E0598\n try:\n # importlib is in Python 2.7+ and 3+\n import importlib\n mod = importlib.import_module(\"salt.modules.inspectlib.{0}\".format(module))\n except ImportError:\n # No importlib around (2.6)\n mod = getattr(__import__(\"salt.modules.inspectlib\", globals(), locals(), fromlist=[six.text_type(module)]), module)\n # pylint: enable=E0598\n\n mod.__grains__ = __grains__\n mod.__pillar__ = __pillar__\n mod.__salt__ = __salt__\n\n return mod\n",
"def get_error_message(error):\n '''\n Get human readable message from Python Exception\n '''\n return error.args[0] if error.args else ''\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/modules/inspector.py
|
delete
|
python
|
def delete(all=False, *databases):
'''
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
'''
if not all and not databases:
raise CommandExecutionError('At least one database ID required.')
try:
ret = dict()
inspector = _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))
for dbid in all and inspector.db.list() or databases:
ret[dbid] = inspector.db._db.purge(six.text_type(dbid))
return ret
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
Remove description snapshots from the system.
::parameter: all. Default: False. Remove all snapshots, if set to True.
CLI example:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspector.py#L250-L277
|
[
"def _(module):\n '''\n Get inspectlib module for the lazy loader.\n\n :param module:\n :return:\n '''\n\n mod = None\n # pylint: disable=E0598\n try:\n # importlib is in Python 2.7+ and 3+\n import importlib\n mod = importlib.import_module(\"salt.modules.inspectlib.{0}\".format(module))\n except ImportError:\n # No importlib around (2.6)\n mod = getattr(__import__(\"salt.modules.inspectlib\", globals(), locals(), fromlist=[six.text_type(module)]), module)\n # pylint: enable=E0598\n\n mod.__grains__ = __grains__\n mod.__pillar__ = __pillar__\n mod.__salt__ = __salt__\n\n return mod\n",
"def get_error_message(error):\n '''\n Get human readable message from Python Exception\n '''\n return error.args[0] if error.args else ''\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright 2015 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Module for full system inspection.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import getpass
from salt.modules.inspectlib.exceptions import (InspectorQueryException,
InspectorSnapshotException,
InspectorKiwiProcessorException)
# Import Salt libs
from salt.ext import six
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.exceptions import get_error_message as _get_error_message
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.platform.is_windows() and 'inspector'
def _(module):
'''
Get inspectlib module for the lazy loader.
:param module:
:return:
'''
mod = None
# pylint: disable=E0598
try:
# importlib is in Python 2.7+ and 3+
import importlib
mod = importlib.import_module("salt.modules.inspectlib.{0}".format(module))
except ImportError:
# No importlib around (2.6)
mod = getattr(__import__("salt.modules.inspectlib", globals(), locals(), fromlist=[six.text_type(module)]), module)
# pylint: enable=E0598
mod.__grains__ = __grains__
mod.__pillar__ = __pillar__
mod.__salt__ = __salt__
return mod
def inspect(mode='all', priority=19, **kwargs):
'''
Start node inspection and save the data to the database for further query.
Parameters:
* **mode**: Clarify inspection mode: configuration, payload, all (default)
payload
* **filter**: Comma-separated directories to track payload.
* **priority**: (advanced) Set priority of the inspection. Default is low priority.
CLI Example:
.. code-block:: bash
salt '*' inspector.inspect
salt '*' inspector.inspect configuration
salt '*' inspector.inspect payload filter=/opt,/ext/oracle
'''
collector = _("collector")
try:
return collector.Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']))\
.request_snapshot(mode, priority=priority, **kwargs)
except InspectorSnapshotException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def query(*args, **kwargs):
'''
Query the node for specific information.
Parameters:
* **scope**: Specify scope of the query.
* **System**: Return system data.
* **Software**: Return software information.
* **Services**: Return known services.
* **Identity**: Return user accounts information for this system.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts.
* **payload**: Payload scope parameters:
filter
Include only results which path starts from the filter string.
time
Display time in Unix ticks or format according to the configured TZ (default)
Values: ticks, tz (default)
size
Format size. Values: B, KB, MB, GB
type
Include payload type.
Values (comma-separated): directory (or dir), link, file (default)
Example (returns everything): type=directory,link,file
owners
Resolve UID/GID to an actual names or leave them numeric (default).
Values: name (default), id
brief
Return just a list of payload elements, if True. Default: False.
* **all**: Return all information (default).
CLI Example:
.. code-block:: bash
salt '*' inspector.query scope=system
salt '*' inspector.query scope=payload type=file,link filter=/etc size=Kb brief=False
'''
query = _("query")
try:
return query.Query(kwargs.get('scope'), cachedir=__opts__['cachedir'])(*args, **kwargs)
except InspectorQueryException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def build(format='qcow2', path='/tmp/'):
'''
Build an image from a current system description.
The image is a system image can be output in bootable ISO or QCOW2 formats.
Node uses the image building library Kiwi to perform the actual build.
Parameters:
* **format**: Specifies output format: "qcow2" or "iso. Default: `qcow2`.
* **path**: Specifies output path where to store built image. Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
'''
try:
_("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile']),
pidfilename='').reuse_snapshot().build(format=format, path=path)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def export(local=False, path="/tmp", format='qcow2'):
'''
Export an image description for Kiwi.
Parameters:
* **local**: Specifies True or False if the export has to be in the local file. Default: False.
* **path**: If `local=True`, then specifies the path where file with the Kiwi description is written.
Default: `/tmp`.
CLI Example:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
'''
if getpass.getuser() != 'root':
raise CommandExecutionError('In order to export system, the minion should run as "root".')
try:
description = _("query").Query('all', cachedir=__opts__['cachedir'])()
return _("collector").Inspector().reuse_snapshot().export(description, local=local, path=path, format=format)
except InspectorKiwiProcessorException as ex:
raise CommandExecutionError(ex)
except Exception as ex:
log.error(_get_error_message(ex))
raise Exception(ex)
def snapshots():
'''
List current description snapshots.
CLI Example:
.. code-block:: bash
salt myminion inspector.snapshots
'''
try:
return _("collector").Inspector(cachedir=__opts__['cachedir'],
piddir=os.path.dirname(__opts__['pidfile'])).db.list()
except InspectorSnapshotException as err:
raise CommandExecutionError(err)
except Exception as err:
log.error(_get_error_message(err))
raise Exception(err)
|
saltstack/salt
|
salt/engines/napalm_syslog.py
|
start
|
python
|
def start(transport='zmq',
address='0.0.0.0',
port=49017,
auth_address='0.0.0.0',
auth_port=49018,
disable_security=False,
certificate=None,
os_whitelist=None,
os_blacklist=None,
error_whitelist=None,
error_blacklist=None,
host_whitelist=None,
host_blacklist=None):
'''
Listen to napalm-logs and publish events into the Salt event bus.
transport: ``zmq``
Choose the desired transport.
.. note::
Currently ``zmq`` is the only valid option.
address: ``0.0.0.0``
The address of the publisher, as configured on napalm-logs.
port: ``49017``
The port of the publisher, as configured on napalm-logs.
auth_address: ``0.0.0.0``
The address used for authentication
when security is not disabled.
auth_port: ``49018``
Port used for authentication.
disable_security: ``False``
Trust unencrypted messages.
Strongly discouraged in production.
certificate: ``None``
Absolute path to the SSL certificate.
os_whitelist: ``None``
List of operating systems allowed. By default everything is allowed.
os_blacklist: ``None``
List of operating system to be ignored. Nothing ignored by default.
error_whitelist: ``None``
List of errors allowed.
error_blacklist: ``None``
List of errors ignored.
host_whitelist: ``None``
List of hosts or IPs to be allowed.
host_blacklist: ``None``
List of hosts of IPs to be ignored.
'''
if not disable_security:
if not certificate:
log.critical('Please use a certificate, or disable the security.')
return
auth = napalm_logs.utils.ClientAuth(certificate,
address=auth_address,
port=auth_port)
transport_recv_fun = _get_transport_recv(name=transport,
address=address,
port=port)
if not transport_recv_fun:
log.critical('Unable to start the engine', exc_info=True)
return
master = False
if __opts__['__role'] == 'master':
master = True
while True:
log.debug('Waiting for napalm-logs to send anything...')
raw_object = transport_recv_fun()
log.debug('Received from napalm-logs:')
log.debug(raw_object)
if not disable_security:
dict_object = auth.decrypt(raw_object)
else:
dict_object = napalm_logs.utils.unserialize(raw_object)
try:
event_os = dict_object['os']
if os_blacklist or os_whitelist:
valid_os = salt.utils.stringutils.check_whitelist_blacklist(
event_os,
whitelist=os_whitelist,
blacklist=os_blacklist)
if not valid_os:
log.info('Ignoring NOS %s as per whitelist/blacklist', event_os)
continue
event_error = dict_object['error']
if error_blacklist or error_whitelist:
valid_error = salt.utils.stringutils.check_whitelist_blacklist(
event_error,
whitelist=error_whitelist,
blacklist=error_blacklist)
if not valid_error:
log.info('Ignoring error %s as per whitelist/blacklist', event_error)
continue
event_host = dict_object.get('host') or dict_object.get('ip')
if host_blacklist or host_whitelist:
valid_host = salt.utils.stringutils.check_whitelist_blacklist(
event_host,
whitelist=host_whitelist,
blacklist=host_blacklist)
if not valid_host:
log.info('Ignoring messages from %s as per whitelist/blacklist', event_host)
continue
tag = 'napalm/syslog/{os}/{error}/{host}'.format(
os=event_os,
error=event_error,
host=event_host
)
except KeyError as kerr:
log.warning('Missing keys from the napalm-logs object:', exc_info=True)
log.warning(dict_object)
continue # jump to the next object in the queue
log.debug('Sending event %s', tag)
log.debug(raw_object)
if master:
event.get_master_event(__opts__,
__opts__['sock_dir']
).fire_event(dict_object, tag)
else:
__salt__['event.send'](tag, dict_object)
|
Listen to napalm-logs and publish events into the Salt event bus.
transport: ``zmq``
Choose the desired transport.
.. note::
Currently ``zmq`` is the only valid option.
address: ``0.0.0.0``
The address of the publisher, as configured on napalm-logs.
port: ``49017``
The port of the publisher, as configured on napalm-logs.
auth_address: ``0.0.0.0``
The address used for authentication
when security is not disabled.
auth_port: ``49018``
Port used for authentication.
disable_security: ``False``
Trust unencrypted messages.
Strongly discouraged in production.
certificate: ``None``
Absolute path to the SSL certificate.
os_whitelist: ``None``
List of operating systems allowed. By default everything is allowed.
os_blacklist: ``None``
List of operating system to be ignored. Nothing ignored by default.
error_whitelist: ``None``
List of errors allowed.
error_blacklist: ``None``
List of errors ignored.
host_whitelist: ``None``
List of hosts or IPs to be allowed.
host_blacklist: ``None``
List of hosts of IPs to be ignored.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/napalm_syslog.py#L248-L378
|
[
"def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False):\n '''\n Return an event object suitable for the named transport\n '''\n # TODO: AIO core is separate from transport\n if opts['transport'] in ('zeromq', 'tcp', 'detect'):\n return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop)\n",
"def _get_transport_recv(name='zmq',\n address='0.0.0.0',\n port=49017,\n **kwargs):\n if name not in TRANSPORT_FUN_MAP:\n log.error('Invalid transport: %s. Falling back to ZeroMQ.', name)\n name = 'zmq'\n return TRANSPORT_FUN_MAP[name](address, port, **kwargs)\n"
] |
# -*- coding: utf-8 -*-
'''
NAPALM syslog engine
====================
.. versionadded:: 2017.7.0
An engine that takes syslog messages structured in
OpenConfig_ or IETF format
and fires Salt events.
.. _OpenConfig: http://www.openconfig.net/
As there can be many messages pushed into the event bus,
the user is able to filter based on the object structure.
Requirements
------------
- `napalm-logs`_
.. _`napalm-logs`: https://github.com/napalm-automation/napalm-logs
This engine transfers objects from the napalm-logs library
into the event bus. The top dictionary has the following keys:
- ``ip``
- ``host``
- ``timestamp``
- ``os``: the network OS identified
- ``model_name``: the OpenConfig or IETF model name
- ``error``: the error name (consult the documentation)
- ``message_details``: details extracted from the syslog message
- ``open_config``: the OpenConfig model
The napalm-logs transfers the messages via widely used transport
mechanisms such as: ZeroMQ (default), Kafka, etc.
The user can select the right transport using the ``transport``
option in the configuration.
:configuration: Example configuration
.. code-block:: yaml
engines:
- napalm_syslog:
transport: zmq
address: 1.2.3.4
port: 49018
:configuration: Configuration example, excluding messages from IOS-XR devices:
.. code-block:: yaml
engines:
- napalm_syslog:
transport: kafka
address: 1.2.3.4
port: 49018
os_blacklist:
- iosxr
Event example:
.. code-block:: json
{
"_stamp": "2017-05-26T10:03:18.653045",
"error": "BGP_PREFIX_THRESH_EXCEEDED",
"host": "vmx01",
"ip": "192.168.140.252",
"message_details": {
"date": "May 25",
"host": "vmx01",
"message": "192.168.140.254 (External AS 65001): Configured maximum prefix-limit threshold(22) exceeded for inet-unicast nlri: 28 (instance master)",
"pri": "28",
"processId": "2957",
"processName": "rpd",
"tag": "BGP_PREFIX_THRESH_EXCEEDED",
"time": "20:50:41"
},
"model_name": "openconfig_bgp",
"open_config": {
"bgp": {
"neighbors": {
"neighbor": {
"192.168.140.254": {
"afi_safis": {
"afi_safi": {
"inet": {
"afi_safi_name": "inet",
"ipv4_unicast": {
"prefix_limit": {
"state": {
"max_prefixes": 22
}
}
},
"state": {
"prefixes": {
"received": 28
}
}
}
}
},
"neighbor_address": "192.168.140.254",
"state": {
"peer_as": 65001
}
}
}
}
}
},
"os": "junos",
"timestamp": "1495741841"
}
To consume the events and eventually react and deploy a configuration changes
on the device(s) firing the event, one is able to identify the minion ID, using
one of the following alternatives, but not limited to:
- :mod:`Host grains <salt.grains.napalm.host>` to match the event tag
- :mod:`Host DNS grain <salt.grains.napalm.host_dns>` to match the IP address in the event data
- :mod:`Hostname grains <salt.grains.napalm.hostname>` to match the event tag
- :ref:`Define static grains <static-custom-grains>`
- :ref:`Write a grains module <writing-grains>`
- :ref:`Targeting minions using pillar data <targeting-pillar>` - The user can
configure certain information in the Pillar data and then use it to identify
minions
Master configuration example, to match the event and react:
.. code-block:: yaml
reactor:
- 'napalm/syslog/*/BGP_PREFIX_THRESH_EXCEEDED/*':
- salt://increase_prefix_limit_on_thresh_exceeded.sls
Which matches the events having the error code ``BGP_PREFIX_THRESH_EXCEEDED``
from any network operating system, from any host and reacts, executing the
``increase_prefix_limit_on_thresh_exceeded.sls`` reactor, found under
one of the :conf_master:`file_roots` paths.
Reactor example:
.. code-block:: yaml
increase_prefix_limit_on_thresh_exceeded:
local.net.load_template:
- tgt: "hostname:{{ data['host'] }}"
- tgt_type: grain
- kwarg:
template_name: salt://increase_prefix_limit.jinja
openconfig_structure: {{ data['open_config'] }}
The reactor in the example increases the BGP prefix limit
when triggered by an event as above. The minion is matched using the ``host``
field from the ``data`` (which is the body of the event), compared to the
:mod:`hostname grain <salt.grains.napalm.hostname>` field. When the event
occurs, the reactor will execute the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` function,
sending as arguments the template ``salt://increase_prefix_limit.jinja`` defined
by the user in their environment and the complete OpenConfig object under
the variable name ``openconfig_structure``. Inside the Jinja template, the user
can process the object from ``openconfig_structure`` and define the bussiness
logic as required.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import logging
# Import third party libraries
from salt.utils.zeromq import zmq
try:
# pylint: disable=W0611
import napalm_logs
import napalm_logs.utils
# pylint: enable=W0611
HAS_NAPALM_LOGS = True
except ImportError:
HAS_NAPALM_LOGS = False
# Import salt libs
import salt.utils.event as event
import salt.utils.network
import salt.utils.stringutils
# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
log = logging.getLogger(__name__)
__virtualname__ = 'napalm_syslog'
# ----------------------------------------------------------------------------------------------------------------------
# helpers
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
Load only if napalm-logs is installed.
'''
if not HAS_NAPALM_LOGS or not zmq:
return (False, 'napalm_syslog could not be loaded. \
Please install napalm-logs library amd ZeroMQ.')
return True
def _zmq(address, port, **kwargs):
context = zmq.Context()
socket = context.socket(zmq.SUB)
if salt.utils.network.is_ipv6(address):
socket.ipv6 = True
socket.connect('tcp://{addr}:{port}'.format(
addr=address,
port=port)
)
socket.setsockopt(zmq.SUBSCRIBE, b'')
return socket.recv
def _get_transport_recv(name='zmq',
address='0.0.0.0',
port=49017,
**kwargs):
if name not in TRANSPORT_FUN_MAP:
log.error('Invalid transport: %s. Falling back to ZeroMQ.', name)
name = 'zmq'
return TRANSPORT_FUN_MAP[name](address, port, **kwargs)
TRANSPORT_FUN_MAP = {
'zmq': _zmq,
'zeromq': _zmq
}
# ----------------------------------------------------------------------------------------------------------------------
# main
# ----------------------------------------------------------------------------------------------------------------------
|
saltstack/salt
|
salt/states/salt_proxy.py
|
configure_proxy
|
python
|
def configure_proxy(name, proxyname='p8000', start=True):
'''
Create the salt proxy file and start the proxy process
if required
Parameters:
name:
The name of this state
proxyname:
Name to be used for this proxy (should match entries in pillar)
start:
Boolean indicating if the process should be started
Example:
..code-block:: yaml
salt-proxy-configure:
salt_proxy.configure_proxy:
- proxyname: p8000
- start: True
'''
ret = __salt__['salt_proxy.configure_proxy'](proxyname,
start=start)
ret.update({
'name': name,
'comment': '{0} config messages'.format(name)
})
return ret
|
Create the salt proxy file and start the proxy process
if required
Parameters:
name:
The name of this state
proxyname:
Name to be used for this proxy (should match entries in pillar)
start:
Boolean indicating if the process should be started
Example:
..code-block:: yaml
salt-proxy-configure:
salt_proxy.configure_proxy:
- proxyname: p8000
- start: True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/salt_proxy.py#L33-L62
| null |
# -*- coding: utf-8 -*-
'''
Salt proxy state
.. versionadded:: 2015.8.2
State to deploy and run salt-proxy processes
on a minion.
Set up pillar data for your proxies per the documentation.
Run the state as below
..code-block:: yaml
salt-proxy-configure:
salt_proxy.configure_proxy:
- proxyname: p8000
- start: True
This state will configure the salt proxy settings
within /etc/salt/proxy (if /etc/salt/proxy doesn't exists)
and start the salt-proxy process (default true),
if it isn't already running.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__name__)
|
saltstack/salt
|
salt/modules/hadoop.py
|
_hadoop_cmd
|
python
|
def _hadoop_cmd(module, command, *args):
'''
Hadoop/hdfs command wrapper
As Hadoop command has been deprecated this module will default
to use hdfs command and fall back to hadoop if it is not found
In order to prevent random execution the module name is checked
Follows hadoop command template:
hadoop module -command args
E.g.: hadoop dfs -ls /
'''
tool = 'hadoop'
if salt.utils.path.which('hdfs'):
tool = 'hdfs'
out = None
if module and command:
if module in __authorized_modules__:
mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)}
cmd = '{tool} {module} -{command} {args}'.format(**mappings)
out = __salt__['cmd.run'](cmd, python_shell=False)
else:
return 'Error: Unknown module'
else:
return 'Error: Module and command not defined'
return out
|
Hadoop/hdfs command wrapper
As Hadoop command has been deprecated this module will default
to use hdfs command and fall back to hadoop if it is not found
In order to prevent random execution the module name is checked
Follows hadoop command template:
hadoop module -command args
E.g.: hadoop dfs -ls /
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hadoop.py#L30-L57
| null |
# -*- coding: utf-8 -*-
'''
Support for hadoop
:maintainer: Yann Jouanin <yann.jouanin@intelunix.fr>
:maturity: new
:depends:
:platform: linux
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.path
__authorized_modules__ = ['version', 'namenode', 'dfsadmin', 'dfs', 'fs']
def __virtual__():
'''
Check if hadoop is present, then load the module
'''
if salt.utils.path.which('hadoop') or salt.utils.path.which('hdfs'):
return 'hadoop'
return (False, 'The hadoop execution module cannot be loaded: hadoop or hdfs binary not in path.')
def version():
'''
Return version from hadoop version
CLI Example:
.. code-block:: bash
salt '*' hadoop.version
'''
module = 'version'
out = _hadoop_cmd(module, True).split()
return out[1]
def dfs(command=None, *args):
'''
Execute a command on DFS
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs ls /
'''
if command:
return _hadoop_cmd('dfs', command, *args)
else:
return 'Error: command must be provided'
def dfsadmin_report(arg=None):
'''
.. versionadded:: 2019.2.0
Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes.
arg
[live] [dead] [decommissioning]
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfsadmin -report
'''
if arg is not None:
if arg in ['live', 'dead', 'decommissioning']:
return _hadoop_cmd('dfsadmin', 'report', arg)
else:
return "Error: the arg is wrong, it must be in ['live', 'dead', 'decommissioning']"
else:
return _hadoop_cmd('dfsadmin', 'report')
def dfs_present(path):
'''
Check if a file or directory is present on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_present /some_random_file
Returns True if the file is present
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return False if match in cmd_return else True
def dfs_absent(path):
'''
Check if a file or directory is absent on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_absent /some_random_file
Returns True if the file is absent
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return True if match in cmd_return else False
def namenode_format(force=None):
'''
Format a name node
.. code-block:: bash
salt '*' hadoop.namenode_format force=True
'''
force_param = ''
if force:
force_param = '-force'
return _hadoop_cmd('namenode', 'format', '-nonInteractive', force_param)
|
saltstack/salt
|
salt/modules/hadoop.py
|
dfs_present
|
python
|
def dfs_present(path):
'''
Check if a file or directory is present on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_present /some_random_file
Returns True if the file is present
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return False if match in cmd_return else True
|
Check if a file or directory is present on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_present /some_random_file
Returns True if the file is present
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hadoop.py#L115-L129
|
[
"def _hadoop_cmd(module, command, *args):\n '''\n Hadoop/hdfs command wrapper\n\n As Hadoop command has been deprecated this module will default\n to use hdfs command and fall back to hadoop if it is not found\n\n In order to prevent random execution the module name is checked\n\n Follows hadoop command template:\n hadoop module -command args\n E.g.: hadoop dfs -ls /\n '''\n tool = 'hadoop'\n if salt.utils.path.which('hdfs'):\n tool = 'hdfs'\n\n out = None\n if module and command:\n if module in __authorized_modules__:\n mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)}\n cmd = '{tool} {module} -{command} {args}'.format(**mappings)\n out = __salt__['cmd.run'](cmd, python_shell=False)\n else:\n return 'Error: Unknown module'\n else:\n return 'Error: Module and command not defined'\n return out\n"
] |
# -*- coding: utf-8 -*-
'''
Support for hadoop
:maintainer: Yann Jouanin <yann.jouanin@intelunix.fr>
:maturity: new
:depends:
:platform: linux
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.path
__authorized_modules__ = ['version', 'namenode', 'dfsadmin', 'dfs', 'fs']
def __virtual__():
'''
Check if hadoop is present, then load the module
'''
if salt.utils.path.which('hadoop') or salt.utils.path.which('hdfs'):
return 'hadoop'
return (False, 'The hadoop execution module cannot be loaded: hadoop or hdfs binary not in path.')
def _hadoop_cmd(module, command, *args):
'''
Hadoop/hdfs command wrapper
As Hadoop command has been deprecated this module will default
to use hdfs command and fall back to hadoop if it is not found
In order to prevent random execution the module name is checked
Follows hadoop command template:
hadoop module -command args
E.g.: hadoop dfs -ls /
'''
tool = 'hadoop'
if salt.utils.path.which('hdfs'):
tool = 'hdfs'
out = None
if module and command:
if module in __authorized_modules__:
mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)}
cmd = '{tool} {module} -{command} {args}'.format(**mappings)
out = __salt__['cmd.run'](cmd, python_shell=False)
else:
return 'Error: Unknown module'
else:
return 'Error: Module and command not defined'
return out
def version():
'''
Return version from hadoop version
CLI Example:
.. code-block:: bash
salt '*' hadoop.version
'''
module = 'version'
out = _hadoop_cmd(module, True).split()
return out[1]
def dfs(command=None, *args):
'''
Execute a command on DFS
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs ls /
'''
if command:
return _hadoop_cmd('dfs', command, *args)
else:
return 'Error: command must be provided'
def dfsadmin_report(arg=None):
'''
.. versionadded:: 2019.2.0
Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes.
arg
[live] [dead] [decommissioning]
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfsadmin -report
'''
if arg is not None:
if arg in ['live', 'dead', 'decommissioning']:
return _hadoop_cmd('dfsadmin', 'report', arg)
else:
return "Error: the arg is wrong, it must be in ['live', 'dead', 'decommissioning']"
else:
return _hadoop_cmd('dfsadmin', 'report')
def dfs_absent(path):
'''
Check if a file or directory is absent on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_absent /some_random_file
Returns True if the file is absent
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return True if match in cmd_return else False
def namenode_format(force=None):
'''
Format a name node
.. code-block:: bash
salt '*' hadoop.namenode_format force=True
'''
force_param = ''
if force:
force_param = '-force'
return _hadoop_cmd('namenode', 'format', '-nonInteractive', force_param)
|
saltstack/salt
|
salt/modules/hadoop.py
|
dfs_absent
|
python
|
def dfs_absent(path):
'''
Check if a file or directory is absent on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_absent /some_random_file
Returns True if the file is absent
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return True if match in cmd_return else False
|
Check if a file or directory is absent on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_absent /some_random_file
Returns True if the file is absent
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hadoop.py#L132-L146
|
[
"def _hadoop_cmd(module, command, *args):\n '''\n Hadoop/hdfs command wrapper\n\n As Hadoop command has been deprecated this module will default\n to use hdfs command and fall back to hadoop if it is not found\n\n In order to prevent random execution the module name is checked\n\n Follows hadoop command template:\n hadoop module -command args\n E.g.: hadoop dfs -ls /\n '''\n tool = 'hadoop'\n if salt.utils.path.which('hdfs'):\n tool = 'hdfs'\n\n out = None\n if module and command:\n if module in __authorized_modules__:\n mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)}\n cmd = '{tool} {module} -{command} {args}'.format(**mappings)\n out = __salt__['cmd.run'](cmd, python_shell=False)\n else:\n return 'Error: Unknown module'\n else:\n return 'Error: Module and command not defined'\n return out\n"
] |
# -*- coding: utf-8 -*-
'''
Support for hadoop
:maintainer: Yann Jouanin <yann.jouanin@intelunix.fr>
:maturity: new
:depends:
:platform: linux
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.path
__authorized_modules__ = ['version', 'namenode', 'dfsadmin', 'dfs', 'fs']
def __virtual__():
'''
Check if hadoop is present, then load the module
'''
if salt.utils.path.which('hadoop') or salt.utils.path.which('hdfs'):
return 'hadoop'
return (False, 'The hadoop execution module cannot be loaded: hadoop or hdfs binary not in path.')
def _hadoop_cmd(module, command, *args):
'''
Hadoop/hdfs command wrapper
As Hadoop command has been deprecated this module will default
to use hdfs command and fall back to hadoop if it is not found
In order to prevent random execution the module name is checked
Follows hadoop command template:
hadoop module -command args
E.g.: hadoop dfs -ls /
'''
tool = 'hadoop'
if salt.utils.path.which('hdfs'):
tool = 'hdfs'
out = None
if module and command:
if module in __authorized_modules__:
mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)}
cmd = '{tool} {module} -{command} {args}'.format(**mappings)
out = __salt__['cmd.run'](cmd, python_shell=False)
else:
return 'Error: Unknown module'
else:
return 'Error: Module and command not defined'
return out
def version():
'''
Return version from hadoop version
CLI Example:
.. code-block:: bash
salt '*' hadoop.version
'''
module = 'version'
out = _hadoop_cmd(module, True).split()
return out[1]
def dfs(command=None, *args):
'''
Execute a command on DFS
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs ls /
'''
if command:
return _hadoop_cmd('dfs', command, *args)
else:
return 'Error: command must be provided'
def dfsadmin_report(arg=None):
'''
.. versionadded:: 2019.2.0
Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes.
arg
[live] [dead] [decommissioning]
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfsadmin -report
'''
if arg is not None:
if arg in ['live', 'dead', 'decommissioning']:
return _hadoop_cmd('dfsadmin', 'report', arg)
else:
return "Error: the arg is wrong, it must be in ['live', 'dead', 'decommissioning']"
else:
return _hadoop_cmd('dfsadmin', 'report')
def dfs_present(path):
'''
Check if a file or directory is present on the distributed FS.
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfs_present /some_random_file
Returns True if the file is present
'''
cmd_return = _hadoop_cmd('dfs', 'stat', path)
match = 'No such file or directory'
return False if match in cmd_return else True
def namenode_format(force=None):
'''
Format a name node
.. code-block:: bash
salt '*' hadoop.namenode_format force=True
'''
force_param = ''
if force:
force_param = '-force'
return _hadoop_cmd('namenode', 'format', '-nonInteractive', force_param)
|
saltstack/salt
|
salt/utils/timed_subprocess.py
|
TimedProc.run
|
python
|
def run(self):
'''
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
'''
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
'{0} : Timed out after {1} seconds'.format(
self.command,
six.text_type(self.timeout),
)
)
return self.process.returncode
|
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/timed_subprocess.py#L82-L113
|
[
"def receive():\n if self.with_communicate:\n self.stdout, self.stderr = self.process.communicate(input=self.stdin)\n elif self.wait:\n self.process.wait()\n"
] |
class TimedProc(object):
'''
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
'''
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop('bg', False)
self.stdin = kwargs.pop('stdin', None)
self.with_communicate = kwargs.pop('with_communicate', self.wait)
self.timeout = kwargs.pop('timeout', None)
self.stdin_raw_newlines = kwargs.pop('stdin_raw_newlines', False)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs['stdin'] = None
self.with_communicate = False
elif self.stdin is not None:
if not self.stdin_raw_newlines:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = salt.utils.stringutils.to_bytes(self.stdin.replace('\\n', '\n'))
kwargs['stdin'] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs['stdout'] = None
self.stderr = kwargs['stderr'] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
if kwargs.get('shell', False):
args = salt.utils.data.decode(args, to_str=True)
try:
self.process = subprocess.Popen(args, **kwargs)
except (AttributeError, TypeError):
if not kwargs.get('shell', False):
if not isinstance(args, (list, tuple)):
try:
args = shlex.split(args)
except AttributeError:
args = shlex.split(six.text_type(args))
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(six.text_type(arg))
else:
str_args.append(arg)
args = str_args
else:
if not isinstance(args, (list, tuple, six.string_types)):
# Handle corner case where someone does a 'cmd.run 3'
args = six.text_type(args)
# Ensure that environment variables are strings
for key, val in six.iteritems(kwargs.get('env', {})):
if not isinstance(val, six.string_types):
kwargs['env'][key] = six.text_type(val)
if not isinstance(key, six.string_types):
kwargs['env'][six.text_type(key)] = kwargs['env'].pop(key)
if six.PY2 and 'env' in kwargs:
# Ensure no unicode in custom env dict, as it can cause
# problems with subprocess.
kwargs['env'] = salt.utils.data.encode_dict(kwargs['env'])
args = salt.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
_get_full_stream
|
python
|
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
|
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L98-L121
|
[
"def _get_basic_stream(stream_name, conn):\n '''\n Stream info from AWS, via describe_stream\n Only returns the first \"page\" of shards (up to 100); use _get_full_stream() for all shards.\n\n CLI example::\n\n salt myminion boto_kinesis._get_basic_stream my_stream existing_conn\n '''\n return _execute_with_retries(conn, \"describe_stream\", StreamName=stream_name)\n",
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
get_stream_when_active
|
python
|
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
|
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L124-L153
|
[
"def _get_basic_stream(stream_name, conn):\n '''\n Stream info from AWS, via describe_stream\n Only returns the first \"page\" of shards (up to 100); use _get_full_stream() for all shards.\n\n CLI example::\n\n salt myminion boto_kinesis._get_basic_stream my_stream existing_conn\n '''\n return _execute_with_retries(conn, \"describe_stream\", StreamName=stream_name)\n",
"def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):\n '''\n Get complete stream info from AWS, via describe_stream, including all shards.\n\n CLI example::\n\n salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n r = {}\n stream = _get_basic_stream(stream_name, conn)['result']\n full_stream = stream\n\n # iterate through if there are > 100 shards (max that AWS will return from describe_stream)\n while stream[\"StreamDescription\"][\"HasMoreShards\"]:\n stream = _execute_with_retries(conn,\n \"describe_stream\",\n StreamName=stream_name,\n ExclusiveStartShardId=stream[\"StreamDescription\"][\"Shards\"][-1][\"ShardId\"])\n stream = stream['result']\n full_stream[\"StreamDescription\"][\"Shards\"] += stream[\"StreamDescription\"][\"Shards\"]\n\n r['result'] = full_stream\n return r\n",
"def _jittered_backoff(attempt, max_retry_delay):\n '''\n Basic exponential backoff\n\n CLI example::\n\n salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds\n '''\n return min(random.random() * (2 ** attempt), max_retry_delay)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
exists
|
python
|
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
|
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L156-L174
|
[
"def _get_basic_stream(stream_name, conn):\n '''\n Stream info from AWS, via describe_stream\n Only returns the first \"page\" of shards (up to 100); use _get_full_stream() for all shards.\n\n CLI example::\n\n salt myminion boto_kinesis._get_basic_stream my_stream existing_conn\n '''\n return _execute_with_retries(conn, \"describe_stream\", StreamName=stream_name)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
create_stream
|
python
|
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
|
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L177-L192
|
[
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
delete_stream
|
python
|
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
|
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L195-L209
|
[
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
increase_stream_retention_period
|
python
|
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
|
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L212-L228
|
[
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
enable_enhanced_monitoring
|
python
|
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
|
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L250-L267
|
[
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
get_info_for_reshard
|
python
|
def get_info_for_reshard(stream_details):
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
|
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L290-L320
|
[
"def long_int(hash_key):\n \"\"\"\n The hash key is a 128-bit int, sent as a string.\n It's necessary to convert to int/long for comparison operations.\n This helper method handles python 2/3 incompatibility\n\n CLI example::\n\n salt myminion boto_kinesis.long_int some_MD5_hash_as_string\n\n :return: long object if python 2.X, int object if python 3.X\n \"\"\"\n if sys.version_info < (3,):\n return long(hash_key) # pylint: disable=incompatible-py3-code\n else:\n return int(hash_key)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
reshard
|
python
|
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
|
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L341-L449
|
[
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n",
"def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):\n '''\n Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.\n Continues to retry when stream is updating or creating.\n If the stream is deleted during retries, the loop will catch the error and break.\n\n CLI example::\n\n salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n stream_status = None\n # only get basic stream until it's active,\n # so we don't pull the full list of shards repeatedly (in case of very large stream)\n attempt = 0\n max_retry_delay = 10\n while stream_status != \"ACTIVE\":\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n attempt += 1\n stream_response = _get_basic_stream(stream_name, conn)\n if 'error' in stream_response:\n return stream_response\n stream_status = stream_response['result'][\"StreamDescription\"][\"StreamStatus\"]\n\n # now it's active, get the full stream if necessary\n if stream_response['result'][\"StreamDescription\"][\"HasMoreShards\"]:\n stream_response = _get_full_stream(stream_name, region, key, keyid, profile)\n\n return stream_response\n",
"def get_info_for_reshard(stream_details):\n \"\"\"\n Collect some data: number of open shards, key range, etc.\n Modifies stream_details to add a sorted list of OpenShards.\n Returns (min_hash_key, max_hash_key, stream_details)\n\n CLI example::\n\n salt myminion boto_kinesis.get_info_for_reshard existing_stream_details\n \"\"\"\n min_hash_key = 0\n max_hash_key = 0\n stream_details[\"OpenShards\"] = []\n for shard in stream_details[\"Shards\"]:\n shard_id = shard[\"ShardId\"]\n if \"EndingSequenceNumber\" in shard[\"SequenceNumberRange\"]:\n # EndingSequenceNumber is null for open shards, so this shard must be closed\n log.debug(\"skipping closed shard %s\", shard_id)\n continue\n stream_details[\"OpenShards\"].append(shard)\n shard[\"HashKeyRange\"][\"StartingHashKey\"] = long_int(\n shard[\"HashKeyRange\"][\"StartingHashKey\"])\n shard[\"HashKeyRange\"][\"EndingHashKey\"] = long_int(\n shard[\"HashKeyRange\"][\"EndingHashKey\"])\n if shard[\"HashKeyRange\"][\"StartingHashKey\"] < min_hash_key:\n min_hash_key = shard[\"HashKeyRange\"][\"StartingHashKey\"]\n if shard[\"HashKeyRange\"][\"EndingHashKey\"] > max_hash_key:\n max_hash_key = shard[\"HashKeyRange\"][\"EndingHashKey\"]\n stream_details[\"OpenShards\"].sort(key=lambda shard: long_int(\n shard[\"HashKeyRange\"][\"StartingHashKey\"]))\n return min_hash_key, max_hash_key, stream_details\n",
"def _get_next_open_shard(stream_details, shard_id):\n '''\n Return the next open shard after shard_id\n\n CLI example::\n\n salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id\n '''\n found = False\n for shard in stream_details[\"OpenShards\"]:\n current_shard_id = shard[\"ShardId\"]\n if current_shard_id == shard_id:\n found = True\n continue\n if found:\n return current_shard_id\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
list_streams
|
python
|
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
|
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L452-L473
|
[
"def _execute_with_retries(conn, function, **kwargs):\n '''\n Retry if we're rate limited by AWS or blocked by another call.\n Give up and return error message if resource not found or argument is invalid.\n\n conn\n The connection established by the calling method via _get_conn()\n\n function\n The function to call on conn. i.e. create_stream\n\n **kwargs\n Any kwargs required by the above function, with their keywords\n i.e. StreamName=stream_name\n\n Returns:\n The result dict with the HTTP response and JSON data if applicable\n as 'result', or an error as 'error'\n\n CLI example::\n\n salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs\n\n '''\n r = {}\n max_attempts = 18\n max_retry_delay = 10\n for attempt in range(max_attempts):\n log.info(\"attempt: %s function: %s\", attempt, function)\n try:\n fn = getattr(conn, function)\n r['result'] = fn(**kwargs)\n return r\n except botocore.exceptions.ClientError as e:\n error_code = e.response['Error']['Code']\n if \"LimitExceededException\" in error_code or \"ResourceInUseException\" in error_code:\n # could be rate limited by AWS or another command is blocking,\n # retry with exponential backoff\n log.debug(\"Retrying due to AWS exception\", exc_info=True)\n time.sleep(_jittered_backoff(attempt, max_retry_delay))\n else:\n # ResourceNotFoundException or InvalidArgumentException\n r['error'] = e.response['Error']\n log.error(r['error'])\n r['result'] = None\n return r\n\n r['error'] = \"Tried to execute function {0} {1} times, but was unable\".format(function, max_attempts)\n log.error(r['error'])\n return r\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
_get_next_open_shard
|
python
|
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
|
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L476-L491
| null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
saltstack/salt
|
salt/modules/boto_kinesis.py
|
_execute_with_retries
|
python
|
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r
|
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L494-L543
|
[
"def _jittered_backoff(attempt, max_retry_delay):\n '''\n Basic exponential backoff\n\n CLI example::\n\n salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds\n '''\n return min(random.random() * (2 ** attempt), max_retry_delay)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Kinesis
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit Kinesis credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
kinesis.keyid: GKTADJGHEIQSXMKKRBJ08H
kinesis.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
kinesis.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
import sys
# Import Salt libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils.versions
# Import third party libs
# pylint: disable=unused-import
try:
import boto3
import botocore
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
log = logging.getLogger(__name__)
__virtualname__ = 'boto_kinesis'
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
return __virtualname__
return has_boto_reqs
def _get_basic_stream(stream_name, conn):
'''
Stream info from AWS, via describe_stream
Only returns the first "page" of shards (up to 100); use _get_full_stream() for all shards.
CLI example::
salt myminion boto_kinesis._get_basic_stream my_stream existing_conn
'''
return _execute_with_retries(conn, "describe_stream", StreamName=stream_name)
def _get_full_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, via describe_stream, including all shards.
CLI example::
salt myminion boto_kinesis._get_full_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)['result']
full_stream = stream
# iterate through if there are > 100 shards (max that AWS will return from describe_stream)
while stream["StreamDescription"]["HasMoreShards"]:
stream = _execute_with_retries(conn,
"describe_stream",
StreamName=stream_name,
ExclusiveStartShardId=stream["StreamDescription"]["Shards"][-1]["ShardId"])
stream = stream['result']
full_stream["StreamDescription"]["Shards"] += stream["StreamDescription"]["Shards"]
r['result'] = full_stream
return r
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state.
Continues to retry when stream is updating or creating.
If the stream is deleted during retries, the loop will catch the error and break.
CLI example::
salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stream_status = None
# only get basic stream until it's active,
# so we don't pull the full list of shards repeatedly (in case of very large stream)
attempt = 0
max_retry_delay = 10
while stream_status != "ACTIVE":
time.sleep(_jittered_backoff(attempt, max_retry_delay))
attempt += 1
stream_response = _get_basic_stream(stream_name, conn)
if 'error' in stream_response:
return stream_response
stream_status = stream_response['result']["StreamDescription"]["StreamStatus"]
# now it's active, get the full stream if necessary
if stream_response['result']["StreamDescription"]["HasMoreShards"]:
stream_response = _get_full_stream(stream_name, region, key, keyid, profile)
return stream_response
def exists(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Check if the stream exists. Returns False and the error if it does not.
CLI example::
salt myminion boto_kinesis.exists my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream = _get_basic_stream(stream_name, conn)
if 'error' in stream:
r['result'] = False
r['error'] = stream['error']
else:
r['result'] = True
return r
def create_stream(stream_name, num_shards, region=None, key=None, keyid=None, profile=None):
'''
Create a stream with name stream_name and initial number of shards num_shards.
CLI example::
salt myminion boto_kinesis.create_stream my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"create_stream",
ShardCount=num_shards,
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
'''
Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r
def increase_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Increase stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.increase_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"increase_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def decrease_stream_retention_period(stream_name, retention_hours,
region=None, key=None, keyid=None, profile=None):
'''
Decrease stream retention period to retention_hours
CLI example::
salt myminion boto_kinesis.decrease_stream_retention_period my_stream N region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"decrease_stream_retention_period",
StreamName=stream_name,
RetentionPeriodHours=retention_hours)
if 'error' not in r:
r['result'] = True
return r
def enable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Enable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.enable_enhanced_monitoring my_stream ["metrics", "to", "enable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"enable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def disable_enhanced_monitoring(stream_name, metrics,
region=None, key=None, keyid=None, profile=None):
'''
Disable enhanced monitoring for the specified shard-level metrics on stream stream_name
CLI example::
salt myminion boto_kinesis.disable_enhanced_monitoring my_stream ["metrics", "to", "disable"] region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"disable_enhanced_monitoring",
StreamName=stream_name,
ShardLevelMetrics=metrics)
if 'error' not in r:
r['result'] = True
return r
def get_info_for_reshard(stream_details):
"""
Collect some data: number of open shards, key range, etc.
Modifies stream_details to add a sorted list of OpenShards.
Returns (min_hash_key, max_hash_key, stream_details)
CLI example::
salt myminion boto_kinesis.get_info_for_reshard existing_stream_details
"""
min_hash_key = 0
max_hash_key = 0
stream_details["OpenShards"] = []
for shard in stream_details["Shards"]:
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
shard["HashKeyRange"]["StartingHashKey"])
shard["HashKeyRange"]["EndingHashKey"] = long_int(
shard["HashKeyRange"]["EndingHashKey"])
if shard["HashKeyRange"]["StartingHashKey"] < min_hash_key:
min_hash_key = shard["HashKeyRange"]["StartingHashKey"]
if shard["HashKeyRange"]["EndingHashKey"] > max_hash_key:
max_hash_key = shard["HashKeyRange"]["EndingHashKey"]
stream_details["OpenShards"].sort(key=lambda shard: long_int(
shard["HashKeyRange"]["StartingHashKey"]))
return min_hash_key, max_hash_key, stream_details
def long_int(hash_key):
"""
The hash key is a 128-bit int, sent as a string.
It's necessary to convert to int/long for comparison operations.
This helper method handles python 2/3 incompatibility
CLI example::
salt myminion boto_kinesis.long_int some_MD5_hash_as_string
:return: long object if python 2.X, int object if python 3.X
"""
if sys.version_info < (3,):
return long(hash_key) # pylint: disable=incompatible-py3-code
else:
return int(hash_key)
def reshard(stream_name, desired_size, force=False,
region=None, key=None, keyid=None, profile=None):
"""
Reshard a kinesis stream. Each call to this function will wait until the stream is ACTIVE,
then make a single split or merge operation. This function decides where to split or merge
with the assumption that the ultimate goal is a balanced partition space.
For safety, user must past in force=True; otherwise, the function will dry run.
CLI example::
salt myminion boto_kinesis.reshard my_stream N True region=us-east-1
:return: True if a split or merge was found/performed, False if nothing is needed
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
stream_response = get_stream_when_active(stream_name, region, key, keyid, profile)
if 'error' in stream_response:
return stream_response
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
for shard_num, shard in enumerate(stream_details["OpenShards"]):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
ending_hash_key = shard["HashKeyRange"]["EndingHashKey"]
# this weird math matches what AWS does when you create a kinesis stream
# with an initial number of shards.
expected_starting_hash_key = (
max_hash_key - min_hash_key) / desired_size * shard_num + shard_num
expected_ending_hash_key = (
max_hash_key - min_hash_key) / desired_size * (shard_num + 1) + shard_num
# fix an off-by-one at the end
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
return r
if ending_hash_key == expected_ending_hash_key:
continue
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
return r
else:
# merge
next_shard_id = _get_next_open_shard(stream_details, shard_id)
if not next_shard_id:
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
return r
log.debug("No split or merge action necessary")
r['result'] = False
return r
def list_streams(region=None, key=None, keyid=None, profile=None):
'''
Return a list of all streams visible to the current account
CLI example:
.. code-block:: bash
salt myminion boto_kinesis.list_streams
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
streams = []
exclusive_start_stream_name = ''
while exclusive_start_stream_name is not None:
args = {'ExclusiveStartStreamName': exclusive_start_stream_name} if exclusive_start_stream_name else {}
ret = _execute_with_retries(conn, 'list_streams', **args)
if 'error' in ret:
return ret
ret = ret['result'] if ret and ret.get('result') else {}
streams += ret.get('StreamNames', [])
exclusive_start_stream_name = streams[-1] if ret.get('HasMoreStreams', False) in (True, 'true') else None
return {'result': streams}
def _get_next_open_shard(stream_details, shard_id):
'''
Return the next open shard after shard_id
CLI example::
salt myminion boto_kinesis._get_next_open_shard existing_stream_details shard_id
'''
found = False
for shard in stream_details["OpenShards"]:
current_shard_id = shard["ShardId"]
if current_shard_id == shard_id:
found = True
continue
if found:
return current_shard_id
def _jittered_backoff(attempt, max_retry_delay):
'''
Basic exponential backoff
CLI example::
salt myminion boto_kinesis._jittered_backoff current_attempt_number max_delay_in_seconds
'''
return min(random.random() * (2 ** attempt), max_retry_delay)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.