repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/modules/win_service.py
|
modify
|
python
|
def modify(name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None):
# pylint: disable=anomalous-backslash-in-string
'''
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
'''
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG |
win32service.SERVICE_QUERY_CONFIG)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
changes['BinaryPath'] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes['ServiceType'] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
changes['StartType'] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes['ServiceAccount'] = account_name
if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:
account_password = ''
if account_password is not None:
changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'
if load_order_group is not None:
changes['LoadOrderGroup'] = load_order_group
if dependencies is not None:
changes['Dependencies'] = dependencies
if display_name is not None:
changes['DisplayName'] = display_name
win32service.ChangeServiceConfig(handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
changes['Description'] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
changes['StartTypeDelayed'] = start_delayed
else:
changes['Warning'] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes
|
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_service.py#L862-L1100
|
[
"def _cmd_quote(cmd):\n r'''\n Helper function to properly format the path to the binary for the service\n Must be wrapped in double quotes to account for paths that have spaces. For\n example:\n\n ``\"C:\\Program Files\\Path\\to\\bin.exe\"``\n\n Args:\n cmd (str): Full path to the binary\n\n Returns:\n str: Properly quoted path to the binary\n '''\n # Remove all single and double quotes from the beginning and the end\n pattern = re.compile('^(\\\\\"|\\').*|.*(\\\\\"|\\')$')\n while pattern.match(cmd) is not None:\n cmd = cmd.strip('\"').strip('\\'')\n # Ensure the path to the binary is wrapped in double quotes to account for\n # spaces in the path\n cmd = '\"{0}\"'.format(cmd)\n return cmd\n"
] |
# -*- coding: utf-8 -*-
'''
Windows Service module.
.. versionchanged:: 2016.11.0 - Rewritten to use PyWin32
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import fnmatch
import logging
import re
import time
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import win32security
import win32service
import win32serviceutil
import pywintypes
HAS_WIN32_MODS = True
except ImportError:
HAS_WIN32_MODS = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'service'
SERVICE_TYPE = {1: 'Kernel Driver',
2: 'File System Driver',
4: 'Adapter Driver',
8: 'Recognizer Driver',
16: 'Win32 Own Process',
32: 'Win32 Share Process',
256: 'Interactive',
'kernel': 1,
'filesystem': 2,
'adapter': 4,
'recognizer': 8,
'own': 16,
'share': 32}
SERVICE_CONTROLS = {1: 'Stop',
2: 'Pause/Continue',
4: 'Shutdown',
8: 'Change Parameters',
16: 'Netbind Change',
32: 'Hardware Profile Change',
64: 'Power Event',
128: 'Session Change',
256: 'Pre-Shutdown',
512: 'Time Change',
1024: 'Trigger Event'}
SERVICE_STATE = {1: 'Stopped',
2: 'Start Pending',
3: 'Stop Pending',
4: 'Running',
5: 'Continue Pending',
6: 'Pause Pending',
7: 'Paused'}
SERVICE_ERRORS = {0: 'No Error',
1066: 'Service Specific Error'}
SERVICE_START_TYPE = {'boot': 0,
'system': 1,
'auto': 2,
'manual': 3,
'disabled': 4,
0: 'Boot',
1: 'System',
2: 'Auto',
3: 'Manual',
4: 'Disabled'}
SERVICE_ERROR_CONTROL = {0: 'Ignore',
1: 'Normal',
2: 'Severe',
3: 'Critical',
'ignore': 0,
'normal': 1,
'severe': 2,
'critical': 3}
def __virtual__():
'''
Only works on Windows systems with PyWin32 installed
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_service: module only works on Windows.'
if not HAS_WIN32_MODS:
return False, 'Module win_service: failed to load win32 modules'
return __virtualname__
class ServiceDependencies(object):
'''
Helper class which provides functionality to get all dependencies and
parents of a Windows service
Args:
name (str): The name of the service. This is not the display name.
Use ``get_service_name`` to find the service name.
all_services (callback): The name of the method which
provides a list of all available service names as done by
the ``win_service.get_all()`` method.
service_info (callback): The name of the method which
allows to pass the service name and returns a dict with meets
the requirements ``{service_name: {'Dependencies': []}}`` as
done by the ``win_service.info(name)`` method
'''
def __init__(self, name, all_services, service_info):
# Sort for predictable behavior
self._all_services = sorted(all_services())
self._name = self._normalize_name(self._all_services, name)
self._service_info = self._populate_service_info(self._all_services, service_info)
def _populate_service_info(self, all_services, service_info):
ret = {}
for name in all_services:
dependencies = service_info(name).get('Dependencies', [])
# Sort for predictable behavior
ret[name] = sorted(self._normalize_multiple_name(all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret[name])
return ret
def _dependencies(self, name):
dependencies = self._service_info.get(name, [])
# Sort for predictable behavior
ret = sorted(self._normalize_multiple_name(self._all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret)
return ret
def _dependencies_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
dependencies = self._dependencies(name)
for dependency in dependencies:
indirect_dependencies = self._dependencies_recursion(dependency)
for indirect_dependency in indirect_dependencies:
if indirect_dependency not in ret:
ret.append(indirect_dependency)
for dependency in dependencies:
if dependency not in ret:
ret.append(dependency)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _normalize_name(self, references, difference):
# Normalize Input
normalized = self._normalize_multiple_name(references, difference)
if not normalized:
raise ValueError("The provided name '{}' does not exist".format(difference))
return normalized[0]
def _normalize_multiple_name(self, references, *differences):
# Normalize Input
ret = list()
for difference in differences:
difference_str = str(difference)
for reference in references:
reference_str = str(reference)
if reference_str.lower() == difference_str.lower() and reference_str not in ret:
ret.append(reference_str)
break
return ret
def dependencies(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._dependencies_recursion(normalized)
else:
ret = self._dependencies(normalized)
log.trace("Dependencies of '%s': '%s'", normalized, ret)
return ret
def _parents(self, name):
# Using a list here to maintain order
ret = list()
try:
# Sort for predictable behavior
for service, dependencies in sorted(self._service_info.items()):
if name in dependencies:
if service in ret:
ret.remove(service)
ret.append(service)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _parents_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
parents = self._parents(name)
for parent in parents:
if parent not in ret:
ret.append(parent)
for parent in parents:
indirect_parents = self._parents_recursion(parent)
for indirect_parent in indirect_parents:
if indirect_parent in ret:
ret.remove(indirect_parent)
ret.append(indirect_parent)
except Exception as e:
log.debug(e)
ret = list()
return ret
def parents(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._parents_recursion(normalized)
else:
ret = self._parents(normalized)
log.trace("Parents of '%s': '%s'", normalized, ret)
return ret
def start_order(self, with_deps=False, with_parents=False):
ret = []
if with_deps:
ret.extend(self.dependencies(with_indirect=True))
normalized = self._normalize_name(self._all_services, self._name)
ret.append(normalized)
if with_parents:
ret.extend(self.parents(with_indirect=True))
return ret
def stop_order(self, with_deps=False, with_parents=False):
order = self.start_order(with_deps=with_deps, with_parents=with_parents)
order.reverse()
return order
def _status_wait(service_name, end_time, service_states):
'''
Helper function that will wait for the status of the service to match the
provided status before an end time expires. Used for service stop and start
.. versionadded:: 2017.7.9,2018.3.4
Args:
service_name (str):
The name of the service
end_time (float):
A future time. e.g. time.time() + 10
service_states (list):
Services statuses to wait for as returned by info()
Returns:
dict: A dictionary containing information about the service.
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
info_results = info(service_name)
while info_results['Status'] in service_states and time.time() < end_time:
# From Microsoft: Do not wait longer than the wait hint. A good interval
# is one-tenth of the wait hint but not less than 1 second and not more
# than 10 seconds.
# https://docs.microsoft.com/en-us/windows/desktop/services/starting-a-service
# https://docs.microsoft.com/en-us/windows/desktop/services/stopping-a-service
# Wait hint is in ms
wait_time = info_results['Status_WaitHint']
# Convert to seconds or 0
wait_time = wait_time / 1000 if wait_time else 0
if wait_time < 1:
wait_time = 1
elif wait_time > 10:
wait_time = 10
time.sleep(wait_time)
info_results = info(service_name)
return info_results
def _cmd_quote(cmd):
r'''
Helper function to properly format the path to the binary for the service
Must be wrapped in double quotes to account for paths that have spaces. For
example:
``"C:\Program Files\Path\to\bin.exe"``
Args:
cmd (str): Full path to the binary
Returns:
str: Properly quoted path to the binary
'''
# Remove all single and double quotes from the beginning and the end
pattern = re.compile('^(\\"|\').*|.*(\\"|\')$')
while pattern.match(cmd) is not None:
cmd = cmd.strip('"').strip('\'')
# Ensure the path to the binary is wrapped in double quotes to account for
# spaces in the path
cmd = '"{0}"'.format(cmd)
return cmd
def get_enabled():
'''
Return a list of enabled services. Enabled is defined as a service that is
marked to Auto Start.
Returns:
list: A list of enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Auto']:
services.add(service['ServiceName'])
return sorted(services)
def get_disabled():
'''
Return a list of disabled services. Disabled is defined as a service that is
marked 'Disabled' or 'Manual'.
Returns:
list: A list of disabled services.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Manual', 'Disabled']:
services.add(service['ServiceName'])
return sorted(services)
def available(name):
'''
Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
'''
for service in get_all():
if name.lower() == service.lower():
return True
return False
def missing(name):
'''
The inverse of service.available.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is missing, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.missing <service name>
'''
return name not in get_all()
def _get_services():
'''
Returns a list of all services on the system.
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ENUMERATE_SERVICE)
try:
services = win32service.EnumServicesStatusEx(handle_scm)
except AttributeError:
services = win32service.EnumServicesStatus(handle_scm)
finally:
win32service.CloseServiceHandle(handle_scm)
return services
def get_all():
'''
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = _get_services()
ret = set()
for service in services:
ret.add(service['ServiceName'])
return sorted(ret)
def get_service_name(*args):
'''
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
Returns:
dict: A dictionary of display names and service names
CLI Examples:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
'''
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if raw_service['DisplayName'] in args or \
raw_service['ServiceName'] in args or \
raw_service['ServiceName'].lower() in args:
services[raw_service['DisplayName']] = raw_service['ServiceName']
else:
services[raw_service['DisplayName']] = raw_service['ServiceName']
return services
def info(name):
'''
Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler
'''
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to connect to the SCM: {0}'.format(exc.strerror))
try:
handle_svc = win32service.OpenService(
handle_scm, name,
win32service.SERVICE_ENUMERATE_DEPENDENTS |
win32service.SERVICE_INTERROGATE |
win32service.SERVICE_QUERY_CONFIG |
win32service.SERVICE_QUERY_STATUS)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)
except pywintypes.error:
description = 'Failed to get description'
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName(
'', 'NT Service\\{0}'.format(name))[0]
ret['sid'] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret['sid'] = 'Failed to get SID'
ret['BinaryPath'] = config_info[3]
ret['LoadOrderGroup'] = config_info[4]
ret['TagID'] = config_info[5]
ret['Dependencies'] = config_info[6]
ret['ServiceAccount'] = config_info[7]
ret['DisplayName'] = config_info[8]
ret['Description'] = description
ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']
ret['Status_CheckPoint'] = status_info['CheckPoint']
ret['Status_WaitHint'] = status_info['WaitHint']
ret['StartTypeDelayed'] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret['ServiceType'] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info['ControlsAccepted'] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']
try:
ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]
except KeyError:
ret['Status_ExitCode'] = status_info['Win32ExitCode']
try:
ret['StartType'] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret['StartType'] = config_info[1]
try:
ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret['ErrorControl'] = config_info[2]
try:
ret['Status'] = SERVICE_STATE[status_info['CurrentState']]
except KeyError:
ret['Status'] = status_info['CurrentState']
return ret
def start(name, timeout=90, with_deps=False, with_parents=False):
'''
Start the specified service.
.. warning::
You cannot start a disabled service in Windows. If the service is
disabled, it will be changed to ``Manual`` start.
Args:
name (str): The name of the service to start
timeout (int):
The time in seconds to wait for the service to start before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled start the given service and the services the current
service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be start
service, this flag indicates that those other services will be started
as well.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already started
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
# Set the service to manual if disabled
if disabled(name):
modify(name, start_type='Manual')
ret = set()
# Using a list here to maintain order
services = ServiceDependencies(name, get_all, info)
start = services.start_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Starting services %s", start)
for name in start:
try:
win32serviceutil.StartService(name)
except pywintypes.error as exc:
if exc.winerror != 1056:
raise CommandExecutionError(
'Failed To Start {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Start Pending', 'Stopped'])
ret.add(srv_status['Status'] == 'Running')
return False not in ret
def stop(name, timeout=90, with_deps=False, with_parents=False):
'''
Stop the specified service
Args:
name (str): The name of the service to stop
timeout (int):
The time in seconds to wait for the service to stop before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled stop the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be stopped
service, this flag indicates that those other services will be stopped
as well.
If disabled, the service stop will fail in case other running services
depend on the to be stopped service.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already stopped
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
ret = set()
services = ServiceDependencies(name, get_all, info)
stop = services.stop_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Stopping services %s", stop)
for name in stop:
try:
win32serviceutil.StopService(name)
except pywintypes.error as exc:
if exc.winerror != 1062:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Running', 'Stop Pending'])
ret.add(srv_status['Status'] == 'Stopped')
return False not in ret
def restart(name, timeout=90, with_deps=False, with_parents=False):
'''
Restart the named service. This issues a stop command followed by a start.
Args:
name: The name of the service to restart.
.. note::
If the name passed is ``salt-minion`` a scheduled task is
created and executed to restart the salt-minion service.
timeout (int):
The time in seconds to wait for the service to stop and start before
returning. Default is 90 seconds
.. note::
The timeout is cumulative meaning it is applied to the stop and
then to the start command. A timeout of 90 could take up to 180
seconds if the service is long in stopping and starting
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled restart the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be
restarted service, this flag indicates that those other services
will be restarted as well.
If disabled, the service restart will fail in case other running
services depend on the to be restarted service.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
if 'salt-minion' in name:
create_win_salt_restart_task()
return execute_salt_restart_task()
ret = set()
ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
return False not in ret
def create_win_salt_restart_task():
'''
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
'''
cmd = 'cmd'
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start ' \
'salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=args,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
def execute_salt_restart_task():
'''
Run the Windows Salt restart task
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.execute_salt_restart_task()
'''
return __salt__['task.run'](name='restart-salt-minion')
def status(name, *args, **kwargs):
'''
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
results = {}
all_services = get_all()
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(all_services, name)
else:
services = [name]
for service in services:
results[service] = info(service)['Status'] in ['Running', 'Stop Pending']
if contains_globbing:
return results
return results[name]
def getsid(name):
'''
Return the SID for this windows service
Args:
name (str): The name of the service for which to return the SID
Returns:
str: A string representing the SID for the service
CLI Example:
.. code-block:: bash
salt '*' service.getsid <service name>
'''
return info(name)['sid']
def enable(name, start_type='auto', start_delayed=False, **kwargs):
'''
Enable the named service to start at boot
Args:
name (str): The name of the service to enable.
start_type (str): Specifies the service start type. Valid options are as
follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
if the start_type is set to ``Auto``. If service_type is not passed,
but the service is already set to ``Auto``, then the flag will be
set.
Returns:
bool: ``True`` if successful, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
modify(name, start_type=start_type, start_delayed=start_delayed)
svcstat = info(name)
if start_type.lower() == 'auto':
return svcstat['StartType'].lower() == start_type.lower() and svcstat['StartTypeDelayed'] == start_delayed
else:
return svcstat['StartType'].lower() == start_type.lower()
def disable(name, **kwargs):
'''
Disable the named service to start at boot
Args:
name (str): The name of the service to disable
Returns:
bool: ``True`` if disabled, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
modify(name, start_type='Disabled')
return info(name)['StartType'] == 'Disabled'
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is set to start
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return info(name)['StartType'] == 'Auto'
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is disabled
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name)
def create(name,
bin_path,
exe_args=None,
display_name=None,
description=None,
service_type='own',
start_type='manual',
start_delayed=False,
error_control='normal',
load_order_group=None,
dependencies=None,
account_name='.\\LocalSystem',
account_password=None,
run_interactive=False,
**kwargs):
'''
Create the named service.
.. versionadded:: 2015.8.0
Args:
name (str):
Specifies the service name. This is not the display_name
bin_path (str):
Specifies the path to the service binary file. Backslashes must be
escaped, eg: ``C:\\path\\to\\binary.exe``
exe_args (str):
Any additional arguments required by the service binary.
display_name (str):
The name to be displayed in the service manager. If not passed, the
``name`` will be used
description (str):
A description of the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual (default): Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set. Default is
``False``
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal (normal): Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: A dictionary containing information about the new service
CLI Example:
.. code-block:: bash
salt '*' service.create <service name> <path to exe> display_name='<display name>'
'''
if display_name is None:
display_name = name
# Test if the service already exists
if name in get_all():
raise CommandExecutionError('Service Already Exists: {0}'.format(name))
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
if start_delayed:
if start_type != 2:
raise CommandExecutionError(
'Invalid Parameter: start_delayed requires start_type "auto"')
if account_name in ['LocalSystem', '.\\LocalSystem',
'LocalService', '.\\LocalService',
'NetworkService', '.\\NetworkService']:
account_password = ''
# Connect to Service Control Manager
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ALL_ACCESS)
# Create the service
handle_svc = win32service.CreateService(handle_scm,
name,
display_name,
win32service.SERVICE_ALL_ACCESS,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
if start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return info(name)
def delete(name, timeout=90):
'''
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm, name, win32service.SERVICE_ALL_ACCESS)
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError(
'Failed to open {0}. {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not present', name)
return True
try:
win32service.DeleteService(handle_svc)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to delete {0}. {1}'.format(name, exc.strerror))
finally:
log.debug('Cleaning up')
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1)
return name not in get_all()
|
saltstack/salt
|
salt/modules/win_service.py
|
enable
|
python
|
def enable(name, start_type='auto', start_delayed=False, **kwargs):
'''
Enable the named service to start at boot
Args:
name (str): The name of the service to enable.
start_type (str): Specifies the service start type. Valid options are as
follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
if the start_type is set to ``Auto``. If service_type is not passed,
but the service is already set to ``Auto``, then the flag will be
set.
Returns:
bool: ``True`` if successful, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
modify(name, start_type=start_type, start_delayed=start_delayed)
svcstat = info(name)
if start_type.lower() == 'auto':
return svcstat['StartType'].lower() == start_type.lower() and svcstat['StartTypeDelayed'] == start_delayed
else:
return svcstat['StartType'].lower() == start_type.lower()
|
Enable the named service to start at boot
Args:
name (str): The name of the service to enable.
start_type (str): Specifies the service start type. Valid options are as
follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
if the start_type is set to ``Auto``. If service_type is not passed,
but the service is already set to ``Auto``, then the flag will be
set.
Returns:
bool: ``True`` if successful, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_service.py#L1103-L1139
|
[
"def info(name):\n '''\n Get information about a service on the system\n\n Args:\n name (str): The name of the service. This is not the display name. Use\n ``get_service_name`` to find the service name.\n\n Returns:\n dict: A dictionary containing information about the service.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.info spooler\n '''\n try:\n handle_scm = win32service.OpenSCManager(\n None, None, win32service.SC_MANAGER_CONNECT)\n except pywintypes.error as exc:\n raise CommandExecutionError(\n 'Failed to connect to the SCM: {0}'.format(exc.strerror))\n\n try:\n handle_svc = win32service.OpenService(\n handle_scm, name,\n win32service.SERVICE_ENUMERATE_DEPENDENTS |\n win32service.SERVICE_INTERROGATE |\n win32service.SERVICE_QUERY_CONFIG |\n win32service.SERVICE_QUERY_STATUS)\n except pywintypes.error as exc:\n raise CommandExecutionError(\n 'Failed To Open {0}: {1}'.format(name, exc.strerror))\n\n try:\n config_info = win32service.QueryServiceConfig(handle_svc)\n status_info = win32service.QueryServiceStatusEx(handle_svc)\n\n try:\n description = win32service.QueryServiceConfig2(\n handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)\n except pywintypes.error:\n description = 'Failed to get description'\n\n delayed_start = win32service.QueryServiceConfig2(\n handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)\n finally:\n win32service.CloseServiceHandle(handle_scm)\n win32service.CloseServiceHandle(handle_svc)\n\n ret = dict()\n try:\n sid = win32security.LookupAccountName(\n '', 'NT Service\\\\{0}'.format(name))[0]\n ret['sid'] = win32security.ConvertSidToStringSid(sid)\n except pywintypes.error:\n ret['sid'] = 'Failed to get SID'\n\n ret['BinaryPath'] = config_info[3]\n ret['LoadOrderGroup'] = config_info[4]\n ret['TagID'] = config_info[5]\n ret['Dependencies'] = config_info[6]\n ret['ServiceAccount'] = config_info[7]\n ret['DisplayName'] = config_info[8]\n ret['Description'] = description\n ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']\n ret['Status_CheckPoint'] = status_info['CheckPoint']\n ret['Status_WaitHint'] = status_info['WaitHint']\n ret['StartTypeDelayed'] = delayed_start\n\n flags = list()\n for bit in SERVICE_TYPE:\n if isinstance(bit, int):\n if config_info[0] & bit:\n flags.append(SERVICE_TYPE[bit])\n\n ret['ServiceType'] = flags if flags else config_info[0]\n\n flags = list()\n for bit in SERVICE_CONTROLS:\n if status_info['ControlsAccepted'] & bit:\n flags.append(SERVICE_CONTROLS[bit])\n\n ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']\n\n try:\n ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]\n except KeyError:\n ret['Status_ExitCode'] = status_info['Win32ExitCode']\n\n try:\n ret['StartType'] = SERVICE_START_TYPE[config_info[1]]\n except KeyError:\n ret['StartType'] = config_info[1]\n\n try:\n ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]\n except KeyError:\n ret['ErrorControl'] = config_info[2]\n\n try:\n ret['Status'] = SERVICE_STATE[status_info['CurrentState']]\n except KeyError:\n ret['Status'] = status_info['CurrentState']\n\n return ret\n",
"def modify(name,\n bin_path=None,\n exe_args=None,\n display_name=None,\n description=None,\n service_type=None,\n start_type=None,\n start_delayed=None,\n error_control=None,\n load_order_group=None,\n dependencies=None,\n account_name=None,\n account_password=None,\n run_interactive=None):\n # pylint: disable=anomalous-backslash-in-string\n '''\n Modify a service's parameters. Changes will not be made for parameters that\n are not passed.\n\n .. versionadded:: 2016.11.0\n\n Args:\n name (str):\n The name of the service. Can be found using the\n ``service.get_service_name`` function\n\n bin_path (str):\n The path to the service executable. Backslashes must be escaped, eg:\n ``C:\\\\path\\\\to\\\\binary.exe``\n\n exe_args (str):\n Any arguments required by the service executable\n\n display_name (str):\n The name to display in the service manager\n\n description (str):\n The description to display for the service\n\n service_type (str):\n Specifies the service type. Default is ``own``. Valid options are as\n follows:\n\n - kernel: Driver service\n - filesystem: File system driver service\n - adapter: Adapter driver service (reserved)\n - recognizer: Recognizer driver service (reserved)\n - own (default): Service runs in its own process\n - share: Service shares a process with one or more other services\n\n start_type (str):\n Specifies the service start type. Valid options are as follows:\n\n - boot: Device driver that is loaded by the boot loader\n - system: Device driver that is started during kernel initialization\n - auto: Service that automatically starts\n - manual: Service must be started manually\n - disabled: Service cannot be started\n\n start_delayed (bool):\n Set the service to Auto(Delayed Start). Only valid if the start_type\n is set to ``Auto``. If service_type is not passed, but the service\n is already set to ``Auto``, then the flag will be set.\n\n error_control (str):\n The severity of the error, and action taken, if this service fails\n to start. Valid options are as follows:\n\n - normal: Error is logged and a message box is displayed\n - severe: Error is logged and computer attempts a restart with the\n last known good configuration\n - critical: Error is logged, computer attempts to restart with the\n last known good configuration, system halts on failure\n - ignore: Error is logged and startup continues, no notification is\n given to the user\n\n load_order_group (str):\n The name of the load order group to which this service belongs\n\n dependencies (list):\n A list of services or load ordering groups that must start before\n this service\n\n account_name (str):\n The name of the account under which the service should run. For\n ``own`` type services this should be in the ``domain\\\\username``\n format. The following are examples of valid built-in service\n accounts:\n\n - NT Authority\\\\LocalService\n - NT Authority\\\\NetworkService\n - NT Authority\\\\LocalSystem\n - .\\LocalSystem\n\n account_password (str):\n The password for the account name specified in ``account_name``. For\n the above built-in accounts, this can be None. Otherwise a password\n must be specified.\n\n run_interactive (bool):\n If this setting is True, the service will be allowed to interact\n with the user. Not recommended for services that run with elevated\n privileges.\n\n Returns:\n dict: a dictionary of changes made\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.modify spooler start_type=disabled\n\n '''\n # pylint: enable=anomalous-backslash-in-string\n # https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx\n # https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx\n handle_scm = win32service.OpenSCManager(\n None, None, win32service.SC_MANAGER_CONNECT)\n\n try:\n handle_svc = win32service.OpenService(\n handle_scm,\n name,\n win32service.SERVICE_CHANGE_CONFIG |\n win32service.SERVICE_QUERY_CONFIG)\n except pywintypes.error as exc:\n raise CommandExecutionError(\n 'Failed To Open {0}: {1}'.format(name, exc.strerror))\n\n config_info = win32service.QueryServiceConfig(handle_svc)\n\n changes = dict()\n\n # Input Validation\n if bin_path is not None:\n # shlex.quote the path to the binary\n bin_path = _cmd_quote(bin_path)\n if exe_args is not None:\n bin_path = '{0} {1}'.format(bin_path, exe_args)\n changes['BinaryPath'] = bin_path\n\n if service_type is not None:\n if service_type.lower() in SERVICE_TYPE:\n service_type = SERVICE_TYPE[service_type.lower()]\n if run_interactive:\n service_type = service_type | \\\n win32service.SERVICE_INTERACTIVE_PROCESS\n else:\n raise CommandExecutionError(\n 'Invalid Service Type: {0}'.format(service_type))\n else:\n if run_interactive is True:\n service_type = config_info[0] | \\\n win32service.SERVICE_INTERACTIVE_PROCESS\n elif run_interactive is False:\n service_type = config_info[0] ^ \\\n win32service.SERVICE_INTERACTIVE_PROCESS\n else:\n service_type = win32service.SERVICE_NO_CHANGE\n\n if service_type is not win32service.SERVICE_NO_CHANGE:\n flags = list()\n for bit in SERVICE_TYPE:\n if isinstance(bit, int) and service_type & bit:\n flags.append(SERVICE_TYPE[bit])\n\n changes['ServiceType'] = flags if flags else service_type\n\n if start_type is not None:\n if start_type.lower() in SERVICE_START_TYPE:\n start_type = SERVICE_START_TYPE[start_type.lower()]\n else:\n raise CommandExecutionError(\n 'Invalid Start Type: {0}'.format(start_type))\n changes['StartType'] = SERVICE_START_TYPE[start_type]\n else:\n start_type = win32service.SERVICE_NO_CHANGE\n\n if error_control is not None:\n if error_control.lower() in SERVICE_ERROR_CONTROL:\n error_control = SERVICE_ERROR_CONTROL[error_control.lower()]\n else:\n raise CommandExecutionError(\n 'Invalid Error Control: {0}'.format(error_control))\n changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]\n else:\n error_control = win32service.SERVICE_NO_CHANGE\n\n if account_name is not None:\n changes['ServiceAccount'] = account_name\n if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:\n account_password = ''\n\n if account_password is not None:\n changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'\n\n if load_order_group is not None:\n changes['LoadOrderGroup'] = load_order_group\n\n if dependencies is not None:\n changes['Dependencies'] = dependencies\n\n if display_name is not None:\n changes['DisplayName'] = display_name\n\n win32service.ChangeServiceConfig(handle_svc,\n service_type,\n start_type,\n error_control,\n bin_path,\n load_order_group,\n 0,\n dependencies,\n account_name,\n account_password,\n display_name)\n\n if description is not None:\n win32service.ChangeServiceConfig2(\n handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)\n changes['Description'] = description\n\n if start_delayed is not None:\n # You can only set delayed start for services that are set to auto start\n # Start type 2 is Auto\n # Start type -1 is no change\n if (start_type == -1 and config_info[1] == 2) or start_type == 2:\n win32service.ChangeServiceConfig2(\n handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,\n start_delayed)\n changes['StartTypeDelayed'] = start_delayed\n else:\n changes['Warning'] = 'start_delayed: Requires start_type \"auto\"'\n\n win32service.CloseServiceHandle(handle_scm)\n win32service.CloseServiceHandle(handle_svc)\n\n return changes\n"
] |
# -*- coding: utf-8 -*-
'''
Windows Service module.
.. versionchanged:: 2016.11.0 - Rewritten to use PyWin32
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import fnmatch
import logging
import re
import time
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import win32security
import win32service
import win32serviceutil
import pywintypes
HAS_WIN32_MODS = True
except ImportError:
HAS_WIN32_MODS = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'service'
SERVICE_TYPE = {1: 'Kernel Driver',
2: 'File System Driver',
4: 'Adapter Driver',
8: 'Recognizer Driver',
16: 'Win32 Own Process',
32: 'Win32 Share Process',
256: 'Interactive',
'kernel': 1,
'filesystem': 2,
'adapter': 4,
'recognizer': 8,
'own': 16,
'share': 32}
SERVICE_CONTROLS = {1: 'Stop',
2: 'Pause/Continue',
4: 'Shutdown',
8: 'Change Parameters',
16: 'Netbind Change',
32: 'Hardware Profile Change',
64: 'Power Event',
128: 'Session Change',
256: 'Pre-Shutdown',
512: 'Time Change',
1024: 'Trigger Event'}
SERVICE_STATE = {1: 'Stopped',
2: 'Start Pending',
3: 'Stop Pending',
4: 'Running',
5: 'Continue Pending',
6: 'Pause Pending',
7: 'Paused'}
SERVICE_ERRORS = {0: 'No Error',
1066: 'Service Specific Error'}
SERVICE_START_TYPE = {'boot': 0,
'system': 1,
'auto': 2,
'manual': 3,
'disabled': 4,
0: 'Boot',
1: 'System',
2: 'Auto',
3: 'Manual',
4: 'Disabled'}
SERVICE_ERROR_CONTROL = {0: 'Ignore',
1: 'Normal',
2: 'Severe',
3: 'Critical',
'ignore': 0,
'normal': 1,
'severe': 2,
'critical': 3}
def __virtual__():
'''
Only works on Windows systems with PyWin32 installed
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_service: module only works on Windows.'
if not HAS_WIN32_MODS:
return False, 'Module win_service: failed to load win32 modules'
return __virtualname__
class ServiceDependencies(object):
'''
Helper class which provides functionality to get all dependencies and
parents of a Windows service
Args:
name (str): The name of the service. This is not the display name.
Use ``get_service_name`` to find the service name.
all_services (callback): The name of the method which
provides a list of all available service names as done by
the ``win_service.get_all()`` method.
service_info (callback): The name of the method which
allows to pass the service name and returns a dict with meets
the requirements ``{service_name: {'Dependencies': []}}`` as
done by the ``win_service.info(name)`` method
'''
def __init__(self, name, all_services, service_info):
# Sort for predictable behavior
self._all_services = sorted(all_services())
self._name = self._normalize_name(self._all_services, name)
self._service_info = self._populate_service_info(self._all_services, service_info)
def _populate_service_info(self, all_services, service_info):
ret = {}
for name in all_services:
dependencies = service_info(name).get('Dependencies', [])
# Sort for predictable behavior
ret[name] = sorted(self._normalize_multiple_name(all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret[name])
return ret
def _dependencies(self, name):
dependencies = self._service_info.get(name, [])
# Sort for predictable behavior
ret = sorted(self._normalize_multiple_name(self._all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret)
return ret
def _dependencies_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
dependencies = self._dependencies(name)
for dependency in dependencies:
indirect_dependencies = self._dependencies_recursion(dependency)
for indirect_dependency in indirect_dependencies:
if indirect_dependency not in ret:
ret.append(indirect_dependency)
for dependency in dependencies:
if dependency not in ret:
ret.append(dependency)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _normalize_name(self, references, difference):
# Normalize Input
normalized = self._normalize_multiple_name(references, difference)
if not normalized:
raise ValueError("The provided name '{}' does not exist".format(difference))
return normalized[0]
def _normalize_multiple_name(self, references, *differences):
# Normalize Input
ret = list()
for difference in differences:
difference_str = str(difference)
for reference in references:
reference_str = str(reference)
if reference_str.lower() == difference_str.lower() and reference_str not in ret:
ret.append(reference_str)
break
return ret
def dependencies(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._dependencies_recursion(normalized)
else:
ret = self._dependencies(normalized)
log.trace("Dependencies of '%s': '%s'", normalized, ret)
return ret
def _parents(self, name):
# Using a list here to maintain order
ret = list()
try:
# Sort for predictable behavior
for service, dependencies in sorted(self._service_info.items()):
if name in dependencies:
if service in ret:
ret.remove(service)
ret.append(service)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _parents_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
parents = self._parents(name)
for parent in parents:
if parent not in ret:
ret.append(parent)
for parent in parents:
indirect_parents = self._parents_recursion(parent)
for indirect_parent in indirect_parents:
if indirect_parent in ret:
ret.remove(indirect_parent)
ret.append(indirect_parent)
except Exception as e:
log.debug(e)
ret = list()
return ret
def parents(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._parents_recursion(normalized)
else:
ret = self._parents(normalized)
log.trace("Parents of '%s': '%s'", normalized, ret)
return ret
def start_order(self, with_deps=False, with_parents=False):
ret = []
if with_deps:
ret.extend(self.dependencies(with_indirect=True))
normalized = self._normalize_name(self._all_services, self._name)
ret.append(normalized)
if with_parents:
ret.extend(self.parents(with_indirect=True))
return ret
def stop_order(self, with_deps=False, with_parents=False):
order = self.start_order(with_deps=with_deps, with_parents=with_parents)
order.reverse()
return order
def _status_wait(service_name, end_time, service_states):
'''
Helper function that will wait for the status of the service to match the
provided status before an end time expires. Used for service stop and start
.. versionadded:: 2017.7.9,2018.3.4
Args:
service_name (str):
The name of the service
end_time (float):
A future time. e.g. time.time() + 10
service_states (list):
Services statuses to wait for as returned by info()
Returns:
dict: A dictionary containing information about the service.
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
info_results = info(service_name)
while info_results['Status'] in service_states and time.time() < end_time:
# From Microsoft: Do not wait longer than the wait hint. A good interval
# is one-tenth of the wait hint but not less than 1 second and not more
# than 10 seconds.
# https://docs.microsoft.com/en-us/windows/desktop/services/starting-a-service
# https://docs.microsoft.com/en-us/windows/desktop/services/stopping-a-service
# Wait hint is in ms
wait_time = info_results['Status_WaitHint']
# Convert to seconds or 0
wait_time = wait_time / 1000 if wait_time else 0
if wait_time < 1:
wait_time = 1
elif wait_time > 10:
wait_time = 10
time.sleep(wait_time)
info_results = info(service_name)
return info_results
def _cmd_quote(cmd):
r'''
Helper function to properly format the path to the binary for the service
Must be wrapped in double quotes to account for paths that have spaces. For
example:
``"C:\Program Files\Path\to\bin.exe"``
Args:
cmd (str): Full path to the binary
Returns:
str: Properly quoted path to the binary
'''
# Remove all single and double quotes from the beginning and the end
pattern = re.compile('^(\\"|\').*|.*(\\"|\')$')
while pattern.match(cmd) is not None:
cmd = cmd.strip('"').strip('\'')
# Ensure the path to the binary is wrapped in double quotes to account for
# spaces in the path
cmd = '"{0}"'.format(cmd)
return cmd
def get_enabled():
'''
Return a list of enabled services. Enabled is defined as a service that is
marked to Auto Start.
Returns:
list: A list of enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Auto']:
services.add(service['ServiceName'])
return sorted(services)
def get_disabled():
'''
Return a list of disabled services. Disabled is defined as a service that is
marked 'Disabled' or 'Manual'.
Returns:
list: A list of disabled services.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Manual', 'Disabled']:
services.add(service['ServiceName'])
return sorted(services)
def available(name):
'''
Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
'''
for service in get_all():
if name.lower() == service.lower():
return True
return False
def missing(name):
'''
The inverse of service.available.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is missing, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.missing <service name>
'''
return name not in get_all()
def _get_services():
'''
Returns a list of all services on the system.
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ENUMERATE_SERVICE)
try:
services = win32service.EnumServicesStatusEx(handle_scm)
except AttributeError:
services = win32service.EnumServicesStatus(handle_scm)
finally:
win32service.CloseServiceHandle(handle_scm)
return services
def get_all():
'''
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = _get_services()
ret = set()
for service in services:
ret.add(service['ServiceName'])
return sorted(ret)
def get_service_name(*args):
'''
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
Returns:
dict: A dictionary of display names and service names
CLI Examples:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
'''
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if raw_service['DisplayName'] in args or \
raw_service['ServiceName'] in args or \
raw_service['ServiceName'].lower() in args:
services[raw_service['DisplayName']] = raw_service['ServiceName']
else:
services[raw_service['DisplayName']] = raw_service['ServiceName']
return services
def info(name):
'''
Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler
'''
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to connect to the SCM: {0}'.format(exc.strerror))
try:
handle_svc = win32service.OpenService(
handle_scm, name,
win32service.SERVICE_ENUMERATE_DEPENDENTS |
win32service.SERVICE_INTERROGATE |
win32service.SERVICE_QUERY_CONFIG |
win32service.SERVICE_QUERY_STATUS)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)
except pywintypes.error:
description = 'Failed to get description'
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName(
'', 'NT Service\\{0}'.format(name))[0]
ret['sid'] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret['sid'] = 'Failed to get SID'
ret['BinaryPath'] = config_info[3]
ret['LoadOrderGroup'] = config_info[4]
ret['TagID'] = config_info[5]
ret['Dependencies'] = config_info[6]
ret['ServiceAccount'] = config_info[7]
ret['DisplayName'] = config_info[8]
ret['Description'] = description
ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']
ret['Status_CheckPoint'] = status_info['CheckPoint']
ret['Status_WaitHint'] = status_info['WaitHint']
ret['StartTypeDelayed'] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret['ServiceType'] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info['ControlsAccepted'] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']
try:
ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]
except KeyError:
ret['Status_ExitCode'] = status_info['Win32ExitCode']
try:
ret['StartType'] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret['StartType'] = config_info[1]
try:
ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret['ErrorControl'] = config_info[2]
try:
ret['Status'] = SERVICE_STATE[status_info['CurrentState']]
except KeyError:
ret['Status'] = status_info['CurrentState']
return ret
def start(name, timeout=90, with_deps=False, with_parents=False):
'''
Start the specified service.
.. warning::
You cannot start a disabled service in Windows. If the service is
disabled, it will be changed to ``Manual`` start.
Args:
name (str): The name of the service to start
timeout (int):
The time in seconds to wait for the service to start before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled start the given service and the services the current
service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be start
service, this flag indicates that those other services will be started
as well.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already started
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
# Set the service to manual if disabled
if disabled(name):
modify(name, start_type='Manual')
ret = set()
# Using a list here to maintain order
services = ServiceDependencies(name, get_all, info)
start = services.start_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Starting services %s", start)
for name in start:
try:
win32serviceutil.StartService(name)
except pywintypes.error as exc:
if exc.winerror != 1056:
raise CommandExecutionError(
'Failed To Start {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Start Pending', 'Stopped'])
ret.add(srv_status['Status'] == 'Running')
return False not in ret
def stop(name, timeout=90, with_deps=False, with_parents=False):
'''
Stop the specified service
Args:
name (str): The name of the service to stop
timeout (int):
The time in seconds to wait for the service to stop before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled stop the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be stopped
service, this flag indicates that those other services will be stopped
as well.
If disabled, the service stop will fail in case other running services
depend on the to be stopped service.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already stopped
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
ret = set()
services = ServiceDependencies(name, get_all, info)
stop = services.stop_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Stopping services %s", stop)
for name in stop:
try:
win32serviceutil.StopService(name)
except pywintypes.error as exc:
if exc.winerror != 1062:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Running', 'Stop Pending'])
ret.add(srv_status['Status'] == 'Stopped')
return False not in ret
def restart(name, timeout=90, with_deps=False, with_parents=False):
'''
Restart the named service. This issues a stop command followed by a start.
Args:
name: The name of the service to restart.
.. note::
If the name passed is ``salt-minion`` a scheduled task is
created and executed to restart the salt-minion service.
timeout (int):
The time in seconds to wait for the service to stop and start before
returning. Default is 90 seconds
.. note::
The timeout is cumulative meaning it is applied to the stop and
then to the start command. A timeout of 90 could take up to 180
seconds if the service is long in stopping and starting
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled restart the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be
restarted service, this flag indicates that those other services
will be restarted as well.
If disabled, the service restart will fail in case other running
services depend on the to be restarted service.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
if 'salt-minion' in name:
create_win_salt_restart_task()
return execute_salt_restart_task()
ret = set()
ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
return False not in ret
def create_win_salt_restart_task():
'''
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
'''
cmd = 'cmd'
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start ' \
'salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=args,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
def execute_salt_restart_task():
'''
Run the Windows Salt restart task
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.execute_salt_restart_task()
'''
return __salt__['task.run'](name='restart-salt-minion')
def status(name, *args, **kwargs):
'''
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
results = {}
all_services = get_all()
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(all_services, name)
else:
services = [name]
for service in services:
results[service] = info(service)['Status'] in ['Running', 'Stop Pending']
if contains_globbing:
return results
return results[name]
def getsid(name):
'''
Return the SID for this windows service
Args:
name (str): The name of the service for which to return the SID
Returns:
str: A string representing the SID for the service
CLI Example:
.. code-block:: bash
salt '*' service.getsid <service name>
'''
return info(name)['sid']
def modify(name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None):
# pylint: disable=anomalous-backslash-in-string
'''
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
'''
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG |
win32service.SERVICE_QUERY_CONFIG)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
changes['BinaryPath'] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes['ServiceType'] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
changes['StartType'] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes['ServiceAccount'] = account_name
if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:
account_password = ''
if account_password is not None:
changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'
if load_order_group is not None:
changes['LoadOrderGroup'] = load_order_group
if dependencies is not None:
changes['Dependencies'] = dependencies
if display_name is not None:
changes['DisplayName'] = display_name
win32service.ChangeServiceConfig(handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
changes['Description'] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
changes['StartTypeDelayed'] = start_delayed
else:
changes['Warning'] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes
def disable(name, **kwargs):
'''
Disable the named service to start at boot
Args:
name (str): The name of the service to disable
Returns:
bool: ``True`` if disabled, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
modify(name, start_type='Disabled')
return info(name)['StartType'] == 'Disabled'
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is set to start
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return info(name)['StartType'] == 'Auto'
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is disabled
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name)
def create(name,
bin_path,
exe_args=None,
display_name=None,
description=None,
service_type='own',
start_type='manual',
start_delayed=False,
error_control='normal',
load_order_group=None,
dependencies=None,
account_name='.\\LocalSystem',
account_password=None,
run_interactive=False,
**kwargs):
'''
Create the named service.
.. versionadded:: 2015.8.0
Args:
name (str):
Specifies the service name. This is not the display_name
bin_path (str):
Specifies the path to the service binary file. Backslashes must be
escaped, eg: ``C:\\path\\to\\binary.exe``
exe_args (str):
Any additional arguments required by the service binary.
display_name (str):
The name to be displayed in the service manager. If not passed, the
``name`` will be used
description (str):
A description of the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual (default): Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set. Default is
``False``
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal (normal): Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: A dictionary containing information about the new service
CLI Example:
.. code-block:: bash
salt '*' service.create <service name> <path to exe> display_name='<display name>'
'''
if display_name is None:
display_name = name
# Test if the service already exists
if name in get_all():
raise CommandExecutionError('Service Already Exists: {0}'.format(name))
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
if start_delayed:
if start_type != 2:
raise CommandExecutionError(
'Invalid Parameter: start_delayed requires start_type "auto"')
if account_name in ['LocalSystem', '.\\LocalSystem',
'LocalService', '.\\LocalService',
'NetworkService', '.\\NetworkService']:
account_password = ''
# Connect to Service Control Manager
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ALL_ACCESS)
# Create the service
handle_svc = win32service.CreateService(handle_scm,
name,
display_name,
win32service.SERVICE_ALL_ACCESS,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
if start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return info(name)
def delete(name, timeout=90):
'''
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm, name, win32service.SERVICE_ALL_ACCESS)
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError(
'Failed to open {0}. {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not present', name)
return True
try:
win32service.DeleteService(handle_svc)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to delete {0}. {1}'.format(name, exc.strerror))
finally:
log.debug('Cleaning up')
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1)
return name not in get_all()
|
saltstack/salt
|
salt/modules/win_service.py
|
create
|
python
|
def create(name,
bin_path,
exe_args=None,
display_name=None,
description=None,
service_type='own',
start_type='manual',
start_delayed=False,
error_control='normal',
load_order_group=None,
dependencies=None,
account_name='.\\LocalSystem',
account_password=None,
run_interactive=False,
**kwargs):
'''
Create the named service.
.. versionadded:: 2015.8.0
Args:
name (str):
Specifies the service name. This is not the display_name
bin_path (str):
Specifies the path to the service binary file. Backslashes must be
escaped, eg: ``C:\\path\\to\\binary.exe``
exe_args (str):
Any additional arguments required by the service binary.
display_name (str):
The name to be displayed in the service manager. If not passed, the
``name`` will be used
description (str):
A description of the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual (default): Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set. Default is
``False``
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal (normal): Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: A dictionary containing information about the new service
CLI Example:
.. code-block:: bash
salt '*' service.create <service name> <path to exe> display_name='<display name>'
'''
if display_name is None:
display_name = name
# Test if the service already exists
if name in get_all():
raise CommandExecutionError('Service Already Exists: {0}'.format(name))
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
if start_delayed:
if start_type != 2:
raise CommandExecutionError(
'Invalid Parameter: start_delayed requires start_type "auto"')
if account_name in ['LocalSystem', '.\\LocalSystem',
'LocalService', '.\\LocalService',
'NetworkService', '.\\NetworkService']:
account_password = ''
# Connect to Service Control Manager
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ALL_ACCESS)
# Create the service
handle_svc = win32service.CreateService(handle_scm,
name,
display_name,
win32service.SERVICE_ALL_ACCESS,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
if start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return info(name)
|
Create the named service.
.. versionadded:: 2015.8.0
Args:
name (str):
Specifies the service name. This is not the display_name
bin_path (str):
Specifies the path to the service binary file. Backslashes must be
escaped, eg: ``C:\\path\\to\\binary.exe``
exe_args (str):
Any additional arguments required by the service binary.
display_name (str):
The name to be displayed in the service manager. If not passed, the
``name`` will be used
description (str):
A description of the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual (default): Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set. Default is
``False``
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal (normal): Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: A dictionary containing information about the new service
CLI Example:
.. code-block:: bash
salt '*' service.create <service name> <path to exe> display_name='<display name>'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_service.py#L1200-L1391
|
[
"def info(name):\n '''\n Get information about a service on the system\n\n Args:\n name (str): The name of the service. This is not the display name. Use\n ``get_service_name`` to find the service name.\n\n Returns:\n dict: A dictionary containing information about the service.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.info spooler\n '''\n try:\n handle_scm = win32service.OpenSCManager(\n None, None, win32service.SC_MANAGER_CONNECT)\n except pywintypes.error as exc:\n raise CommandExecutionError(\n 'Failed to connect to the SCM: {0}'.format(exc.strerror))\n\n try:\n handle_svc = win32service.OpenService(\n handle_scm, name,\n win32service.SERVICE_ENUMERATE_DEPENDENTS |\n win32service.SERVICE_INTERROGATE |\n win32service.SERVICE_QUERY_CONFIG |\n win32service.SERVICE_QUERY_STATUS)\n except pywintypes.error as exc:\n raise CommandExecutionError(\n 'Failed To Open {0}: {1}'.format(name, exc.strerror))\n\n try:\n config_info = win32service.QueryServiceConfig(handle_svc)\n status_info = win32service.QueryServiceStatusEx(handle_svc)\n\n try:\n description = win32service.QueryServiceConfig2(\n handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)\n except pywintypes.error:\n description = 'Failed to get description'\n\n delayed_start = win32service.QueryServiceConfig2(\n handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)\n finally:\n win32service.CloseServiceHandle(handle_scm)\n win32service.CloseServiceHandle(handle_svc)\n\n ret = dict()\n try:\n sid = win32security.LookupAccountName(\n '', 'NT Service\\\\{0}'.format(name))[0]\n ret['sid'] = win32security.ConvertSidToStringSid(sid)\n except pywintypes.error:\n ret['sid'] = 'Failed to get SID'\n\n ret['BinaryPath'] = config_info[3]\n ret['LoadOrderGroup'] = config_info[4]\n ret['TagID'] = config_info[5]\n ret['Dependencies'] = config_info[6]\n ret['ServiceAccount'] = config_info[7]\n ret['DisplayName'] = config_info[8]\n ret['Description'] = description\n ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']\n ret['Status_CheckPoint'] = status_info['CheckPoint']\n ret['Status_WaitHint'] = status_info['WaitHint']\n ret['StartTypeDelayed'] = delayed_start\n\n flags = list()\n for bit in SERVICE_TYPE:\n if isinstance(bit, int):\n if config_info[0] & bit:\n flags.append(SERVICE_TYPE[bit])\n\n ret['ServiceType'] = flags if flags else config_info[0]\n\n flags = list()\n for bit in SERVICE_CONTROLS:\n if status_info['ControlsAccepted'] & bit:\n flags.append(SERVICE_CONTROLS[bit])\n\n ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']\n\n try:\n ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]\n except KeyError:\n ret['Status_ExitCode'] = status_info['Win32ExitCode']\n\n try:\n ret['StartType'] = SERVICE_START_TYPE[config_info[1]]\n except KeyError:\n ret['StartType'] = config_info[1]\n\n try:\n ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]\n except KeyError:\n ret['ErrorControl'] = config_info[2]\n\n try:\n ret['Status'] = SERVICE_STATE[status_info['CurrentState']]\n except KeyError:\n ret['Status'] = status_info['CurrentState']\n\n return ret\n",
"def get_all():\n '''\n Return all installed services\n\n Returns:\n list: Returns a list of all services on the system.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_all\n '''\n services = _get_services()\n\n ret = set()\n for service in services:\n ret.add(service['ServiceName'])\n\n return sorted(ret)\n",
"def _cmd_quote(cmd):\n r'''\n Helper function to properly format the path to the binary for the service\n Must be wrapped in double quotes to account for paths that have spaces. For\n example:\n\n ``\"C:\\Program Files\\Path\\to\\bin.exe\"``\n\n Args:\n cmd (str): Full path to the binary\n\n Returns:\n str: Properly quoted path to the binary\n '''\n # Remove all single and double quotes from the beginning and the end\n pattern = re.compile('^(\\\\\"|\\').*|.*(\\\\\"|\\')$')\n while pattern.match(cmd) is not None:\n cmd = cmd.strip('\"').strip('\\'')\n # Ensure the path to the binary is wrapped in double quotes to account for\n # spaces in the path\n cmd = '\"{0}\"'.format(cmd)\n return cmd\n"
] |
# -*- coding: utf-8 -*-
'''
Windows Service module.
.. versionchanged:: 2016.11.0 - Rewritten to use PyWin32
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import fnmatch
import logging
import re
import time
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import win32security
import win32service
import win32serviceutil
import pywintypes
HAS_WIN32_MODS = True
except ImportError:
HAS_WIN32_MODS = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'service'
SERVICE_TYPE = {1: 'Kernel Driver',
2: 'File System Driver',
4: 'Adapter Driver',
8: 'Recognizer Driver',
16: 'Win32 Own Process',
32: 'Win32 Share Process',
256: 'Interactive',
'kernel': 1,
'filesystem': 2,
'adapter': 4,
'recognizer': 8,
'own': 16,
'share': 32}
SERVICE_CONTROLS = {1: 'Stop',
2: 'Pause/Continue',
4: 'Shutdown',
8: 'Change Parameters',
16: 'Netbind Change',
32: 'Hardware Profile Change',
64: 'Power Event',
128: 'Session Change',
256: 'Pre-Shutdown',
512: 'Time Change',
1024: 'Trigger Event'}
SERVICE_STATE = {1: 'Stopped',
2: 'Start Pending',
3: 'Stop Pending',
4: 'Running',
5: 'Continue Pending',
6: 'Pause Pending',
7: 'Paused'}
SERVICE_ERRORS = {0: 'No Error',
1066: 'Service Specific Error'}
SERVICE_START_TYPE = {'boot': 0,
'system': 1,
'auto': 2,
'manual': 3,
'disabled': 4,
0: 'Boot',
1: 'System',
2: 'Auto',
3: 'Manual',
4: 'Disabled'}
SERVICE_ERROR_CONTROL = {0: 'Ignore',
1: 'Normal',
2: 'Severe',
3: 'Critical',
'ignore': 0,
'normal': 1,
'severe': 2,
'critical': 3}
def __virtual__():
'''
Only works on Windows systems with PyWin32 installed
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_service: module only works on Windows.'
if not HAS_WIN32_MODS:
return False, 'Module win_service: failed to load win32 modules'
return __virtualname__
class ServiceDependencies(object):
'''
Helper class which provides functionality to get all dependencies and
parents of a Windows service
Args:
name (str): The name of the service. This is not the display name.
Use ``get_service_name`` to find the service name.
all_services (callback): The name of the method which
provides a list of all available service names as done by
the ``win_service.get_all()`` method.
service_info (callback): The name of the method which
allows to pass the service name and returns a dict with meets
the requirements ``{service_name: {'Dependencies': []}}`` as
done by the ``win_service.info(name)`` method
'''
def __init__(self, name, all_services, service_info):
# Sort for predictable behavior
self._all_services = sorted(all_services())
self._name = self._normalize_name(self._all_services, name)
self._service_info = self._populate_service_info(self._all_services, service_info)
def _populate_service_info(self, all_services, service_info):
ret = {}
for name in all_services:
dependencies = service_info(name).get('Dependencies', [])
# Sort for predictable behavior
ret[name] = sorted(self._normalize_multiple_name(all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret[name])
return ret
def _dependencies(self, name):
dependencies = self._service_info.get(name, [])
# Sort for predictable behavior
ret = sorted(self._normalize_multiple_name(self._all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret)
return ret
def _dependencies_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
dependencies = self._dependencies(name)
for dependency in dependencies:
indirect_dependencies = self._dependencies_recursion(dependency)
for indirect_dependency in indirect_dependencies:
if indirect_dependency not in ret:
ret.append(indirect_dependency)
for dependency in dependencies:
if dependency not in ret:
ret.append(dependency)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _normalize_name(self, references, difference):
# Normalize Input
normalized = self._normalize_multiple_name(references, difference)
if not normalized:
raise ValueError("The provided name '{}' does not exist".format(difference))
return normalized[0]
def _normalize_multiple_name(self, references, *differences):
# Normalize Input
ret = list()
for difference in differences:
difference_str = str(difference)
for reference in references:
reference_str = str(reference)
if reference_str.lower() == difference_str.lower() and reference_str not in ret:
ret.append(reference_str)
break
return ret
def dependencies(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._dependencies_recursion(normalized)
else:
ret = self._dependencies(normalized)
log.trace("Dependencies of '%s': '%s'", normalized, ret)
return ret
def _parents(self, name):
# Using a list here to maintain order
ret = list()
try:
# Sort for predictable behavior
for service, dependencies in sorted(self._service_info.items()):
if name in dependencies:
if service in ret:
ret.remove(service)
ret.append(service)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _parents_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
parents = self._parents(name)
for parent in parents:
if parent not in ret:
ret.append(parent)
for parent in parents:
indirect_parents = self._parents_recursion(parent)
for indirect_parent in indirect_parents:
if indirect_parent in ret:
ret.remove(indirect_parent)
ret.append(indirect_parent)
except Exception as e:
log.debug(e)
ret = list()
return ret
def parents(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._parents_recursion(normalized)
else:
ret = self._parents(normalized)
log.trace("Parents of '%s': '%s'", normalized, ret)
return ret
def start_order(self, with_deps=False, with_parents=False):
ret = []
if with_deps:
ret.extend(self.dependencies(with_indirect=True))
normalized = self._normalize_name(self._all_services, self._name)
ret.append(normalized)
if with_parents:
ret.extend(self.parents(with_indirect=True))
return ret
def stop_order(self, with_deps=False, with_parents=False):
order = self.start_order(with_deps=with_deps, with_parents=with_parents)
order.reverse()
return order
def _status_wait(service_name, end_time, service_states):
'''
Helper function that will wait for the status of the service to match the
provided status before an end time expires. Used for service stop and start
.. versionadded:: 2017.7.9,2018.3.4
Args:
service_name (str):
The name of the service
end_time (float):
A future time. e.g. time.time() + 10
service_states (list):
Services statuses to wait for as returned by info()
Returns:
dict: A dictionary containing information about the service.
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
info_results = info(service_name)
while info_results['Status'] in service_states and time.time() < end_time:
# From Microsoft: Do not wait longer than the wait hint. A good interval
# is one-tenth of the wait hint but not less than 1 second and not more
# than 10 seconds.
# https://docs.microsoft.com/en-us/windows/desktop/services/starting-a-service
# https://docs.microsoft.com/en-us/windows/desktop/services/stopping-a-service
# Wait hint is in ms
wait_time = info_results['Status_WaitHint']
# Convert to seconds or 0
wait_time = wait_time / 1000 if wait_time else 0
if wait_time < 1:
wait_time = 1
elif wait_time > 10:
wait_time = 10
time.sleep(wait_time)
info_results = info(service_name)
return info_results
def _cmd_quote(cmd):
r'''
Helper function to properly format the path to the binary for the service
Must be wrapped in double quotes to account for paths that have spaces. For
example:
``"C:\Program Files\Path\to\bin.exe"``
Args:
cmd (str): Full path to the binary
Returns:
str: Properly quoted path to the binary
'''
# Remove all single and double quotes from the beginning and the end
pattern = re.compile('^(\\"|\').*|.*(\\"|\')$')
while pattern.match(cmd) is not None:
cmd = cmd.strip('"').strip('\'')
# Ensure the path to the binary is wrapped in double quotes to account for
# spaces in the path
cmd = '"{0}"'.format(cmd)
return cmd
def get_enabled():
'''
Return a list of enabled services. Enabled is defined as a service that is
marked to Auto Start.
Returns:
list: A list of enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Auto']:
services.add(service['ServiceName'])
return sorted(services)
def get_disabled():
'''
Return a list of disabled services. Disabled is defined as a service that is
marked 'Disabled' or 'Manual'.
Returns:
list: A list of disabled services.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Manual', 'Disabled']:
services.add(service['ServiceName'])
return sorted(services)
def available(name):
'''
Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
'''
for service in get_all():
if name.lower() == service.lower():
return True
return False
def missing(name):
'''
The inverse of service.available.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is missing, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.missing <service name>
'''
return name not in get_all()
def _get_services():
'''
Returns a list of all services on the system.
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ENUMERATE_SERVICE)
try:
services = win32service.EnumServicesStatusEx(handle_scm)
except AttributeError:
services = win32service.EnumServicesStatus(handle_scm)
finally:
win32service.CloseServiceHandle(handle_scm)
return services
def get_all():
'''
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = _get_services()
ret = set()
for service in services:
ret.add(service['ServiceName'])
return sorted(ret)
def get_service_name(*args):
'''
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
Returns:
dict: A dictionary of display names and service names
CLI Examples:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
'''
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if raw_service['DisplayName'] in args or \
raw_service['ServiceName'] in args or \
raw_service['ServiceName'].lower() in args:
services[raw_service['DisplayName']] = raw_service['ServiceName']
else:
services[raw_service['DisplayName']] = raw_service['ServiceName']
return services
def info(name):
'''
Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler
'''
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to connect to the SCM: {0}'.format(exc.strerror))
try:
handle_svc = win32service.OpenService(
handle_scm, name,
win32service.SERVICE_ENUMERATE_DEPENDENTS |
win32service.SERVICE_INTERROGATE |
win32service.SERVICE_QUERY_CONFIG |
win32service.SERVICE_QUERY_STATUS)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)
except pywintypes.error:
description = 'Failed to get description'
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName(
'', 'NT Service\\{0}'.format(name))[0]
ret['sid'] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret['sid'] = 'Failed to get SID'
ret['BinaryPath'] = config_info[3]
ret['LoadOrderGroup'] = config_info[4]
ret['TagID'] = config_info[5]
ret['Dependencies'] = config_info[6]
ret['ServiceAccount'] = config_info[7]
ret['DisplayName'] = config_info[8]
ret['Description'] = description
ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']
ret['Status_CheckPoint'] = status_info['CheckPoint']
ret['Status_WaitHint'] = status_info['WaitHint']
ret['StartTypeDelayed'] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret['ServiceType'] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info['ControlsAccepted'] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']
try:
ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]
except KeyError:
ret['Status_ExitCode'] = status_info['Win32ExitCode']
try:
ret['StartType'] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret['StartType'] = config_info[1]
try:
ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret['ErrorControl'] = config_info[2]
try:
ret['Status'] = SERVICE_STATE[status_info['CurrentState']]
except KeyError:
ret['Status'] = status_info['CurrentState']
return ret
def start(name, timeout=90, with_deps=False, with_parents=False):
'''
Start the specified service.
.. warning::
You cannot start a disabled service in Windows. If the service is
disabled, it will be changed to ``Manual`` start.
Args:
name (str): The name of the service to start
timeout (int):
The time in seconds to wait for the service to start before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled start the given service and the services the current
service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be start
service, this flag indicates that those other services will be started
as well.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already started
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
# Set the service to manual if disabled
if disabled(name):
modify(name, start_type='Manual')
ret = set()
# Using a list here to maintain order
services = ServiceDependencies(name, get_all, info)
start = services.start_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Starting services %s", start)
for name in start:
try:
win32serviceutil.StartService(name)
except pywintypes.error as exc:
if exc.winerror != 1056:
raise CommandExecutionError(
'Failed To Start {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Start Pending', 'Stopped'])
ret.add(srv_status['Status'] == 'Running')
return False not in ret
def stop(name, timeout=90, with_deps=False, with_parents=False):
'''
Stop the specified service
Args:
name (str): The name of the service to stop
timeout (int):
The time in seconds to wait for the service to stop before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled stop the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be stopped
service, this flag indicates that those other services will be stopped
as well.
If disabled, the service stop will fail in case other running services
depend on the to be stopped service.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already stopped
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
ret = set()
services = ServiceDependencies(name, get_all, info)
stop = services.stop_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Stopping services %s", stop)
for name in stop:
try:
win32serviceutil.StopService(name)
except pywintypes.error as exc:
if exc.winerror != 1062:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Running', 'Stop Pending'])
ret.add(srv_status['Status'] == 'Stopped')
return False not in ret
def restart(name, timeout=90, with_deps=False, with_parents=False):
'''
Restart the named service. This issues a stop command followed by a start.
Args:
name: The name of the service to restart.
.. note::
If the name passed is ``salt-minion`` a scheduled task is
created and executed to restart the salt-minion service.
timeout (int):
The time in seconds to wait for the service to stop and start before
returning. Default is 90 seconds
.. note::
The timeout is cumulative meaning it is applied to the stop and
then to the start command. A timeout of 90 could take up to 180
seconds if the service is long in stopping and starting
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled restart the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be
restarted service, this flag indicates that those other services
will be restarted as well.
If disabled, the service restart will fail in case other running
services depend on the to be restarted service.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
if 'salt-minion' in name:
create_win_salt_restart_task()
return execute_salt_restart_task()
ret = set()
ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
return False not in ret
def create_win_salt_restart_task():
'''
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
'''
cmd = 'cmd'
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start ' \
'salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=args,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
def execute_salt_restart_task():
'''
Run the Windows Salt restart task
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.execute_salt_restart_task()
'''
return __salt__['task.run'](name='restart-salt-minion')
def status(name, *args, **kwargs):
'''
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
results = {}
all_services = get_all()
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(all_services, name)
else:
services = [name]
for service in services:
results[service] = info(service)['Status'] in ['Running', 'Stop Pending']
if contains_globbing:
return results
return results[name]
def getsid(name):
'''
Return the SID for this windows service
Args:
name (str): The name of the service for which to return the SID
Returns:
str: A string representing the SID for the service
CLI Example:
.. code-block:: bash
salt '*' service.getsid <service name>
'''
return info(name)['sid']
def modify(name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None):
# pylint: disable=anomalous-backslash-in-string
'''
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
'''
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG |
win32service.SERVICE_QUERY_CONFIG)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
changes['BinaryPath'] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes['ServiceType'] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
changes['StartType'] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes['ServiceAccount'] = account_name
if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:
account_password = ''
if account_password is not None:
changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'
if load_order_group is not None:
changes['LoadOrderGroup'] = load_order_group
if dependencies is not None:
changes['Dependencies'] = dependencies
if display_name is not None:
changes['DisplayName'] = display_name
win32service.ChangeServiceConfig(handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
changes['Description'] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
changes['StartTypeDelayed'] = start_delayed
else:
changes['Warning'] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes
def enable(name, start_type='auto', start_delayed=False, **kwargs):
'''
Enable the named service to start at boot
Args:
name (str): The name of the service to enable.
start_type (str): Specifies the service start type. Valid options are as
follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
if the start_type is set to ``Auto``. If service_type is not passed,
but the service is already set to ``Auto``, then the flag will be
set.
Returns:
bool: ``True`` if successful, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
modify(name, start_type=start_type, start_delayed=start_delayed)
svcstat = info(name)
if start_type.lower() == 'auto':
return svcstat['StartType'].lower() == start_type.lower() and svcstat['StartTypeDelayed'] == start_delayed
else:
return svcstat['StartType'].lower() == start_type.lower()
def disable(name, **kwargs):
'''
Disable the named service to start at boot
Args:
name (str): The name of the service to disable
Returns:
bool: ``True`` if disabled, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
modify(name, start_type='Disabled')
return info(name)['StartType'] == 'Disabled'
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is set to start
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return info(name)['StartType'] == 'Auto'
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is disabled
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name)
def delete(name, timeout=90):
'''
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm, name, win32service.SERVICE_ALL_ACCESS)
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError(
'Failed to open {0}. {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not present', name)
return True
try:
win32service.DeleteService(handle_svc)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to delete {0}. {1}'.format(name, exc.strerror))
finally:
log.debug('Cleaning up')
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1)
return name not in get_all()
|
saltstack/salt
|
salt/modules/win_service.py
|
delete
|
python
|
def delete(name, timeout=90):
'''
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm, name, win32service.SERVICE_ALL_ACCESS)
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError(
'Failed to open {0}. {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not present', name)
return True
try:
win32service.DeleteService(handle_svc)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to delete {0}. {1}'.format(name, exc.strerror))
finally:
log.debug('Cleaning up')
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1)
return name not in get_all()
|
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_service.py#L1394-L1447
|
[
"def get_all():\n '''\n Return all installed services\n\n Returns:\n list: Returns a list of all services on the system.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_all\n '''\n services = _get_services()\n\n ret = set()\n for service in services:\n ret.add(service['ServiceName'])\n\n return sorted(ret)\n"
] |
# -*- coding: utf-8 -*-
'''
Windows Service module.
.. versionchanged:: 2016.11.0 - Rewritten to use PyWin32
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import fnmatch
import logging
import re
import time
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import win32security
import win32service
import win32serviceutil
import pywintypes
HAS_WIN32_MODS = True
except ImportError:
HAS_WIN32_MODS = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'service'
SERVICE_TYPE = {1: 'Kernel Driver',
2: 'File System Driver',
4: 'Adapter Driver',
8: 'Recognizer Driver',
16: 'Win32 Own Process',
32: 'Win32 Share Process',
256: 'Interactive',
'kernel': 1,
'filesystem': 2,
'adapter': 4,
'recognizer': 8,
'own': 16,
'share': 32}
SERVICE_CONTROLS = {1: 'Stop',
2: 'Pause/Continue',
4: 'Shutdown',
8: 'Change Parameters',
16: 'Netbind Change',
32: 'Hardware Profile Change',
64: 'Power Event',
128: 'Session Change',
256: 'Pre-Shutdown',
512: 'Time Change',
1024: 'Trigger Event'}
SERVICE_STATE = {1: 'Stopped',
2: 'Start Pending',
3: 'Stop Pending',
4: 'Running',
5: 'Continue Pending',
6: 'Pause Pending',
7: 'Paused'}
SERVICE_ERRORS = {0: 'No Error',
1066: 'Service Specific Error'}
SERVICE_START_TYPE = {'boot': 0,
'system': 1,
'auto': 2,
'manual': 3,
'disabled': 4,
0: 'Boot',
1: 'System',
2: 'Auto',
3: 'Manual',
4: 'Disabled'}
SERVICE_ERROR_CONTROL = {0: 'Ignore',
1: 'Normal',
2: 'Severe',
3: 'Critical',
'ignore': 0,
'normal': 1,
'severe': 2,
'critical': 3}
def __virtual__():
'''
Only works on Windows systems with PyWin32 installed
'''
if not salt.utils.platform.is_windows():
return False, 'Module win_service: module only works on Windows.'
if not HAS_WIN32_MODS:
return False, 'Module win_service: failed to load win32 modules'
return __virtualname__
class ServiceDependencies(object):
'''
Helper class which provides functionality to get all dependencies and
parents of a Windows service
Args:
name (str): The name of the service. This is not the display name.
Use ``get_service_name`` to find the service name.
all_services (callback): The name of the method which
provides a list of all available service names as done by
the ``win_service.get_all()`` method.
service_info (callback): The name of the method which
allows to pass the service name and returns a dict with meets
the requirements ``{service_name: {'Dependencies': []}}`` as
done by the ``win_service.info(name)`` method
'''
def __init__(self, name, all_services, service_info):
# Sort for predictable behavior
self._all_services = sorted(all_services())
self._name = self._normalize_name(self._all_services, name)
self._service_info = self._populate_service_info(self._all_services, service_info)
def _populate_service_info(self, all_services, service_info):
ret = {}
for name in all_services:
dependencies = service_info(name).get('Dependencies', [])
# Sort for predictable behavior
ret[name] = sorted(self._normalize_multiple_name(all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret[name])
return ret
def _dependencies(self, name):
dependencies = self._service_info.get(name, [])
# Sort for predictable behavior
ret = sorted(self._normalize_multiple_name(self._all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret)
return ret
def _dependencies_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
dependencies = self._dependencies(name)
for dependency in dependencies:
indirect_dependencies = self._dependencies_recursion(dependency)
for indirect_dependency in indirect_dependencies:
if indirect_dependency not in ret:
ret.append(indirect_dependency)
for dependency in dependencies:
if dependency not in ret:
ret.append(dependency)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _normalize_name(self, references, difference):
# Normalize Input
normalized = self._normalize_multiple_name(references, difference)
if not normalized:
raise ValueError("The provided name '{}' does not exist".format(difference))
return normalized[0]
def _normalize_multiple_name(self, references, *differences):
# Normalize Input
ret = list()
for difference in differences:
difference_str = str(difference)
for reference in references:
reference_str = str(reference)
if reference_str.lower() == difference_str.lower() and reference_str not in ret:
ret.append(reference_str)
break
return ret
def dependencies(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._dependencies_recursion(normalized)
else:
ret = self._dependencies(normalized)
log.trace("Dependencies of '%s': '%s'", normalized, ret)
return ret
def _parents(self, name):
# Using a list here to maintain order
ret = list()
try:
# Sort for predictable behavior
for service, dependencies in sorted(self._service_info.items()):
if name in dependencies:
if service in ret:
ret.remove(service)
ret.append(service)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _parents_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
parents = self._parents(name)
for parent in parents:
if parent not in ret:
ret.append(parent)
for parent in parents:
indirect_parents = self._parents_recursion(parent)
for indirect_parent in indirect_parents:
if indirect_parent in ret:
ret.remove(indirect_parent)
ret.append(indirect_parent)
except Exception as e:
log.debug(e)
ret = list()
return ret
def parents(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._parents_recursion(normalized)
else:
ret = self._parents(normalized)
log.trace("Parents of '%s': '%s'", normalized, ret)
return ret
def start_order(self, with_deps=False, with_parents=False):
ret = []
if with_deps:
ret.extend(self.dependencies(with_indirect=True))
normalized = self._normalize_name(self._all_services, self._name)
ret.append(normalized)
if with_parents:
ret.extend(self.parents(with_indirect=True))
return ret
def stop_order(self, with_deps=False, with_parents=False):
order = self.start_order(with_deps=with_deps, with_parents=with_parents)
order.reverse()
return order
def _status_wait(service_name, end_time, service_states):
'''
Helper function that will wait for the status of the service to match the
provided status before an end time expires. Used for service stop and start
.. versionadded:: 2017.7.9,2018.3.4
Args:
service_name (str):
The name of the service
end_time (float):
A future time. e.g. time.time() + 10
service_states (list):
Services statuses to wait for as returned by info()
Returns:
dict: A dictionary containing information about the service.
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
info_results = info(service_name)
while info_results['Status'] in service_states and time.time() < end_time:
# From Microsoft: Do not wait longer than the wait hint. A good interval
# is one-tenth of the wait hint but not less than 1 second and not more
# than 10 seconds.
# https://docs.microsoft.com/en-us/windows/desktop/services/starting-a-service
# https://docs.microsoft.com/en-us/windows/desktop/services/stopping-a-service
# Wait hint is in ms
wait_time = info_results['Status_WaitHint']
# Convert to seconds or 0
wait_time = wait_time / 1000 if wait_time else 0
if wait_time < 1:
wait_time = 1
elif wait_time > 10:
wait_time = 10
time.sleep(wait_time)
info_results = info(service_name)
return info_results
def _cmd_quote(cmd):
r'''
Helper function to properly format the path to the binary for the service
Must be wrapped in double quotes to account for paths that have spaces. For
example:
``"C:\Program Files\Path\to\bin.exe"``
Args:
cmd (str): Full path to the binary
Returns:
str: Properly quoted path to the binary
'''
# Remove all single and double quotes from the beginning and the end
pattern = re.compile('^(\\"|\').*|.*(\\"|\')$')
while pattern.match(cmd) is not None:
cmd = cmd.strip('"').strip('\'')
# Ensure the path to the binary is wrapped in double quotes to account for
# spaces in the path
cmd = '"{0}"'.format(cmd)
return cmd
def get_enabled():
'''
Return a list of enabled services. Enabled is defined as a service that is
marked to Auto Start.
Returns:
list: A list of enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Auto']:
services.add(service['ServiceName'])
return sorted(services)
def get_disabled():
'''
Return a list of disabled services. Disabled is defined as a service that is
marked 'Disabled' or 'Manual'.
Returns:
list: A list of disabled services.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service['ServiceName'])['StartType'] in ['Manual', 'Disabled']:
services.add(service['ServiceName'])
return sorted(services)
def available(name):
'''
Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
'''
for service in get_all():
if name.lower() == service.lower():
return True
return False
def missing(name):
'''
The inverse of service.available.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is missing, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.missing <service name>
'''
return name not in get_all()
def _get_services():
'''
Returns a list of all services on the system.
'''
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ENUMERATE_SERVICE)
try:
services = win32service.EnumServicesStatusEx(handle_scm)
except AttributeError:
services = win32service.EnumServicesStatus(handle_scm)
finally:
win32service.CloseServiceHandle(handle_scm)
return services
def get_all():
'''
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = _get_services()
ret = set()
for service in services:
ret.add(service['ServiceName'])
return sorted(ret)
def get_service_name(*args):
'''
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
Returns:
dict: A dictionary of display names and service names
CLI Examples:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
'''
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if raw_service['DisplayName'] in args or \
raw_service['ServiceName'] in args or \
raw_service['ServiceName'].lower() in args:
services[raw_service['DisplayName']] = raw_service['ServiceName']
else:
services[raw_service['DisplayName']] = raw_service['ServiceName']
return services
def info(name):
'''
Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler
'''
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to connect to the SCM: {0}'.format(exc.strerror))
try:
handle_svc = win32service.OpenService(
handle_scm, name,
win32service.SERVICE_ENUMERATE_DEPENDENTS |
win32service.SERVICE_INTERROGATE |
win32service.SERVICE_QUERY_CONFIG |
win32service.SERVICE_QUERY_STATUS)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)
except pywintypes.error:
description = 'Failed to get description'
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName(
'', 'NT Service\\{0}'.format(name))[0]
ret['sid'] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret['sid'] = 'Failed to get SID'
ret['BinaryPath'] = config_info[3]
ret['LoadOrderGroup'] = config_info[4]
ret['TagID'] = config_info[5]
ret['Dependencies'] = config_info[6]
ret['ServiceAccount'] = config_info[7]
ret['DisplayName'] = config_info[8]
ret['Description'] = description
ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']
ret['Status_CheckPoint'] = status_info['CheckPoint']
ret['Status_WaitHint'] = status_info['WaitHint']
ret['StartTypeDelayed'] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret['ServiceType'] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info['ControlsAccepted'] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']
try:
ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]
except KeyError:
ret['Status_ExitCode'] = status_info['Win32ExitCode']
try:
ret['StartType'] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret['StartType'] = config_info[1]
try:
ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret['ErrorControl'] = config_info[2]
try:
ret['Status'] = SERVICE_STATE[status_info['CurrentState']]
except KeyError:
ret['Status'] = status_info['CurrentState']
return ret
def start(name, timeout=90, with_deps=False, with_parents=False):
'''
Start the specified service.
.. warning::
You cannot start a disabled service in Windows. If the service is
disabled, it will be changed to ``Manual`` start.
Args:
name (str): The name of the service to start
timeout (int):
The time in seconds to wait for the service to start before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled start the given service and the services the current
service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be start
service, this flag indicates that those other services will be started
as well.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already started
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
# Set the service to manual if disabled
if disabled(name):
modify(name, start_type='Manual')
ret = set()
# Using a list here to maintain order
services = ServiceDependencies(name, get_all, info)
start = services.start_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Starting services %s", start)
for name in start:
try:
win32serviceutil.StartService(name)
except pywintypes.error as exc:
if exc.winerror != 1056:
raise CommandExecutionError(
'Failed To Start {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Start Pending', 'Stopped'])
ret.add(srv_status['Status'] == 'Running')
return False not in ret
def stop(name, timeout=90, with_deps=False, with_parents=False):
'''
Stop the specified service
Args:
name (str): The name of the service to stop
timeout (int):
The time in seconds to wait for the service to stop before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled stop the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be stopped
service, this flag indicates that those other services will be stopped
as well.
If disabled, the service stop will fail in case other running services
depend on the to be stopped service.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already stopped
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
ret = set()
services = ServiceDependencies(name, get_all, info)
stop = services.stop_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Stopping services %s", stop)
for name in stop:
try:
win32serviceutil.StopService(name)
except pywintypes.error as exc:
if exc.winerror != 1062:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Running', 'Stop Pending'])
ret.add(srv_status['Status'] == 'Stopped')
return False not in ret
def restart(name, timeout=90, with_deps=False, with_parents=False):
'''
Restart the named service. This issues a stop command followed by a start.
Args:
name: The name of the service to restart.
.. note::
If the name passed is ``salt-minion`` a scheduled task is
created and executed to restart the salt-minion service.
timeout (int):
The time in seconds to wait for the service to stop and start before
returning. Default is 90 seconds
.. note::
The timeout is cumulative meaning it is applied to the stop and
then to the start command. A timeout of 90 could take up to 180
seconds if the service is long in stopping and starting
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled restart the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be
restarted service, this flag indicates that those other services
will be restarted as well.
If disabled, the service restart will fail in case other running
services depend on the to be restarted service.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
if 'salt-minion' in name:
create_win_salt_restart_task()
return execute_salt_restart_task()
ret = set()
ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
return False not in ret
def create_win_salt_restart_task():
'''
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
'''
cmd = 'cmd'
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start ' \
'salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=args,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
def execute_salt_restart_task():
'''
Run the Windows Salt restart task
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.execute_salt_restart_task()
'''
return __salt__['task.run'](name='restart-salt-minion')
def status(name, *args, **kwargs):
'''
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
results = {}
all_services = get_all()
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(all_services, name)
else:
services = [name]
for service in services:
results[service] = info(service)['Status'] in ['Running', 'Stop Pending']
if contains_globbing:
return results
return results[name]
def getsid(name):
'''
Return the SID for this windows service
Args:
name (str): The name of the service for which to return the SID
Returns:
str: A string representing the SID for the service
CLI Example:
.. code-block:: bash
salt '*' service.getsid <service name>
'''
return info(name)['sid']
def modify(name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None):
# pylint: disable=anomalous-backslash-in-string
'''
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
'''
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG |
win32service.SERVICE_QUERY_CONFIG)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
changes['BinaryPath'] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes['ServiceType'] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
changes['StartType'] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
changes['ErrorControl'] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes['ServiceAccount'] = account_name
if account_name in ['LocalSystem', 'LocalService', 'NetworkService']:
account_password = ''
if account_password is not None:
changes['ServiceAccountPassword'] = 'XXX-REDACTED-XXX'
if load_order_group is not None:
changes['LoadOrderGroup'] = load_order_group
if dependencies is not None:
changes['Dependencies'] = dependencies
if display_name is not None:
changes['DisplayName'] = display_name
win32service.ChangeServiceConfig(handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
changes['Description'] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
changes['StartTypeDelayed'] = start_delayed
else:
changes['Warning'] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes
def enable(name, start_type='auto', start_delayed=False, **kwargs):
'''
Enable the named service to start at boot
Args:
name (str): The name of the service to enable.
start_type (str): Specifies the service start type. Valid options are as
follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
if the start_type is set to ``Auto``. If service_type is not passed,
but the service is already set to ``Auto``, then the flag will be
set.
Returns:
bool: ``True`` if successful, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
modify(name, start_type=start_type, start_delayed=start_delayed)
svcstat = info(name)
if start_type.lower() == 'auto':
return svcstat['StartType'].lower() == start_type.lower() and svcstat['StartTypeDelayed'] == start_delayed
else:
return svcstat['StartType'].lower() == start_type.lower()
def disable(name, **kwargs):
'''
Disable the named service to start at boot
Args:
name (str): The name of the service to disable
Returns:
bool: ``True`` if disabled, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
modify(name, start_type='Disabled')
return info(name)['StartType'] == 'Disabled'
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is set to start
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return info(name)['StartType'] == 'Auto'
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is disabled
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name)
def create(name,
bin_path,
exe_args=None,
display_name=None,
description=None,
service_type='own',
start_type='manual',
start_delayed=False,
error_control='normal',
load_order_group=None,
dependencies=None,
account_name='.\\LocalSystem',
account_password=None,
run_interactive=False,
**kwargs):
'''
Create the named service.
.. versionadded:: 2015.8.0
Args:
name (str):
Specifies the service name. This is not the display_name
bin_path (str):
Specifies the path to the service binary file. Backslashes must be
escaped, eg: ``C:\\path\\to\\binary.exe``
exe_args (str):
Any additional arguments required by the service binary.
display_name (str):
The name to be displayed in the service manager. If not passed, the
``name`` will be used
description (str):
A description of the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual (default): Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set. Default is
``False``
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal (normal): Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: A dictionary containing information about the new service
CLI Example:
.. code-block:: bash
salt '*' service.create <service name> <path to exe> display_name='<display name>'
'''
if display_name is None:
display_name = name
# Test if the service already exists
if name in get_all():
raise CommandExecutionError('Service Already Exists: {0}'.format(name))
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = '{0} {1}'.format(bin_path, exe_args)
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
'Invalid Service Type: {0}'.format(service_type))
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError(
'Invalid Start Type: {0}'.format(start_type))
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
'Invalid Error Control: {0}'.format(error_control))
if start_delayed:
if start_type != 2:
raise CommandExecutionError(
'Invalid Parameter: start_delayed requires start_type "auto"')
if account_name in ['LocalSystem', '.\\LocalSystem',
'LocalService', '.\\LocalService',
'NetworkService', '.\\NetworkService']:
account_password = ''
# Connect to Service Control Manager
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ALL_ACCESS)
# Create the service
handle_svc = win32service.CreateService(handle_scm,
name,
display_name,
win32service.SERVICE_ALL_ACCESS,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description)
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
if start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed)
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return info(name)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_conn
|
python
|
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
|
Return a conn object for the passed VM data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L112-L156
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('apikey', 'secretkey', 'host', 'path')\n )\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_location
|
python
|
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
|
Return the node location to use
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L159-L168
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_security_groups
|
python
|
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
|
Return a list of security groups to use, defaulting to ['default']
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L171-L183
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_keypair
|
python
|
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
|
Return the keypair to use
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L206-L215
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_ip
|
python
|
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
|
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L218-L228
| null |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_networkid
|
python
|
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
|
Return the networkid to use, only valid for Advanced Zone
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L231-L240
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
get_project
|
python
|
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
|
Return the project to use.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L243-L263
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
create
|
python
|
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
|
Create a single VM from a data dict
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L266-L431
|
[
"def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n",
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n",
"def get_location(conn, vm_):\n '''\n Return the node location to use\n '''\n locations = conn.list_locations()\n # Default to Dallas if not otherwise set\n loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)\n for location in locations:\n if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):\n return location\n",
"def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n driver = get_driver(Provider.CLOUDSTACK)\n\n verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',\n get_configured_provider(),\n __opts__,\n default=True,\n search_global=False)\n\n if verify_ssl_cert is False:\n try:\n import libcloud.security\n libcloud.security.VERIFY_SSL_CERT = False\n except (ImportError, AttributeError):\n raise SaltCloudSystemExit(\n 'Could not disable SSL certificate verification. '\n 'Not loading module.'\n )\n\n return driver(\n key=config.get_cloud_config_value(\n 'apikey', get_configured_provider(), __opts__, search_global=False\n ),\n secret=config.get_cloud_config_value(\n 'secretkey', get_configured_provider(), __opts__,\n search_global=False\n ),\n secure=config.get_cloud_config_value(\n 'secure', get_configured_provider(), __opts__,\n default=True, search_global=False\n ),\n host=config.get_cloud_config_value(\n 'host', get_configured_provider(), __opts__, search_global=False\n ),\n path=config.get_cloud_config_value(\n 'path', get_configured_provider(), __opts__, search_global=False\n ),\n port=config.get_cloud_config_value(\n 'port', get_configured_provider(), __opts__,\n default=None, search_global=False\n )\n )\n",
"def block_device_mappings(vm_):\n '''\n Return the block device mapping:\n\n ::\n\n [{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},\n {'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]\n '''\n return config.get_cloud_config_value(\n 'block_device_mappings', vm_, __opts__, search_global=True\n )\n",
"def get_key():\n '''\n Returns the ssh private key for VM access\n '''\n return config.get_cloud_config_value(\n 'private_key', get_configured_provider(), __opts__, search_global=False\n )\n",
"def get_security_groups(conn, vm_):\n '''\n Return a list of security groups to use, defaulting to ['default']\n '''\n securitygroup_enabled = config.get_cloud_config_value(\n 'securitygroup_enabled', vm_, __opts__, default=True\n )\n if securitygroup_enabled:\n return config.get_cloud_config_value(\n 'securitygroup', vm_, __opts__, default=['default']\n )\n else:\n return False\n",
"def get_keypair(vm_):\n '''\n Return the keypair to use\n '''\n keypair = config.get_cloud_config_value('keypair', vm_, __opts__)\n\n if keypair:\n return keypair\n else:\n return False\n",
"def get_ip(data):\n '''\n Return the IP address of the VM\n If the VM has public IP as defined by libcloud module then use it\n Otherwise try to extract the private IP and use that one.\n '''\n try:\n ip = data.public_ips[0]\n except Exception:\n ip = data.private_ips[0]\n return ip\n",
"def get_networkid(vm_):\n '''\n Return the networkid to use, only valid for Advanced Zone\n '''\n networkid = config.get_cloud_config_value('networkid', vm_, __opts__)\n\n if networkid is not None:\n return networkid\n else:\n return False\n",
"def get_project(conn, vm_):\n '''\n Return the project to use.\n '''\n try:\n projects = conn.ex_list_projects()\n except AttributeError:\n # with versions <0.15 of libcloud this is causing an AttributeError.\n log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')\n return False\n projid = config.get_cloud_config_value('projectid', vm_, __opts__)\n\n if not projid:\n return False\n\n for project in projects:\n if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):\n return project\n\n log.warning(\"Couldn't find project %s in projects\", projid)\n return False\n",
"def cloudstack_displayname(vm_):\n '''\n Return display name of VM:\n\n ::\n \"minion1\"\n '''\n return config.get_cloud_config_value(\n 'cloudstack_displayname', vm_, __opts__, search_global=True\n )\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/cloud/clouds/cloudstack.py
|
destroy
|
python
|
def destroy(name, conn=None, call=None):
'''
Delete a single VM, and all of its volumes
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
sock_dir=__opts__['sock_dir'],
args={'name': name},
)
if __opts__['delete_sshkeys'] is True:
salt.utils.cloud.remove_sshkey(node.public_ips[0])
return True
|
Delete a single VM, and all of its volumes
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L434-L525
|
[
"def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n driver = get_driver(Provider.CLOUDSTACK)\n\n verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',\n get_configured_provider(),\n __opts__,\n default=True,\n search_global=False)\n\n if verify_ssl_cert is False:\n try:\n import libcloud.security\n libcloud.security.VERIFY_SSL_CERT = False\n except (ImportError, AttributeError):\n raise SaltCloudSystemExit(\n 'Could not disable SSL certificate verification. '\n 'Not loading module.'\n )\n\n return driver(\n key=config.get_cloud_config_value(\n 'apikey', get_configured_provider(), __opts__, search_global=False\n ),\n secret=config.get_cloud_config_value(\n 'secretkey', get_configured_provider(), __opts__,\n search_global=False\n ),\n secure=config.get_cloud_config_value(\n 'secure', get_configured_provider(), __opts__,\n default=True, search_global=False\n ),\n host=config.get_cloud_config_value(\n 'host', get_configured_provider(), __opts__, search_global=False\n ),\n path=config.get_cloud_config_value(\n 'path', get_configured_provider(), __opts__, search_global=False\n ),\n port=config.get_cloud_config_value(\n 'port', get_configured_provider(), __opts__,\n default=None, search_global=False\n )\n )\n",
"def remove_sshkey(host, known_hosts=None):\n '''\n Remove a host from the known_hosts file\n '''\n if known_hosts is None:\n if 'HOME' in os.environ:\n known_hosts = '{0}/.ssh/known_hosts'.format(os.environ['HOME'])\n else:\n try:\n known_hosts = '{0}/.ssh/known_hosts'.format(\n pwd.getpwuid(os.getuid()).pwd_dir\n )\n except Exception:\n pass\n\n if known_hosts is not None:\n log.debug(\n 'Removing ssh key for %s from known hosts file %s',\n host, known_hosts\n )\n else:\n log.debug('Removing ssh key for %s from known hosts file', host)\n\n cmd = 'ssh-keygen -R {0}'.format(host)\n subprocess.call(cmd, shell=True)\n"
] |
# -*- coding: utf-8 -*-
'''
CloudStack Cloud Module
=======================
The CloudStack cloud module is used to control access to a CloudStack based
Public Cloud.
:depends: libcloud >= 0.15
Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
``path`` parameters.
.. code-block:: yaml
my-cloudstack-cloud-config:
apikey: <your api key >
secretkey: <your secret key >
host: localhost
path: /client/api
driver: cloudstack
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
try:
from libcloud.compute.drivers.cloudstack import CloudStackNetwork
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Get logging started
log = logging.getLogger(__name__)
# Redirect CloudStack functions to this module namespace
get_node = namespaced_function(get_node, globals())
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
__virtualname__ = 'cloudstack'
# Only load in this module if the CLOUDSTACK configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for CloudStack configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'secretkey', 'host', 'path')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
driver = get_driver(Provider.CLOUDSTACK)
verify_ssl_cert = config.get_cloud_config_value('verify_ssl_cert',
get_configured_provider(),
__opts__,
default=True,
search_global=False)
if verify_ssl_cert is False:
try:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
except (ImportError, AttributeError):
raise SaltCloudSystemExit(
'Could not disable SSL certificate verification. '
'Not loading module.'
)
return driver(
key=config.get_cloud_config_value(
'apikey', get_configured_provider(), __opts__, search_global=False
),
secret=config.get_cloud_config_value(
'secretkey', get_configured_provider(), __opts__,
search_global=False
),
secure=config.get_cloud_config_value(
'secure', get_configured_provider(), __opts__,
default=True, search_global=False
),
host=config.get_cloud_config_value(
'host', get_configured_provider(), __opts__, search_global=False
),
path=config.get_cloud_config_value(
'path', get_configured_provider(), __opts__, search_global=False
),
port=config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=None, search_global=False
)
)
def get_location(conn, vm_):
'''
Return the node location to use
'''
locations = conn.list_locations()
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
securitygroup_enabled = config.get_cloud_config_value(
'securitygroup_enabled', vm_, __opts__, default=True
)
if securitygroup_enabled:
return config.get_cloud_config_value(
'securitygroup', vm_, __opts__, default=['default']
)
else:
return False
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_key():
'''
Returns the ssh private key for VM access
'''
return config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__, search_global=False
)
def get_keypair(vm_):
'''
Return the keypair to use
'''
keypair = config.get_cloud_config_value('keypair', vm_, __opts__)
if keypair:
return keypair
else:
return False
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip
def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False
def get_project(conn, vm_):
'''
Return the project to use.
'''
try:
projects = conn.ex_list_projects()
except AttributeError:
# with versions <0.15 of libcloud this is causing an AttributeError.
log.warning('Cannot get projects, you may need to update libcloud to 0.15 or later')
return False
projid = config.get_cloud_config_value('projectid', vm_, __opts__)
if not projid:
return False
for project in projects:
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project %s in projects", projid)
return False
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret
def block_device_mappings(vm_):
'''
Return the block device mapping:
::
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'},
{'DeviceName': '/dev/sdc', 'VirtualName': 'ephemeral1'}]
'''
return config.get_cloud_config_value(
'block_device_mappings', vm_, __opts__, search_global=True
)
def cloudstack_displayname(vm_):
'''
Return display name of VM:
::
"minion1"
'''
return config.get_cloud_config_value(
'cloudstack_displayname', vm_, __opts__, search_global=True
)
|
saltstack/salt
|
salt/modules/sysrc.py
|
get
|
python
|
def get(**kwargs):
'''
Return system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.get includeDefaults=True
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
if 'name' in kwargs:
cmd += ' '+kwargs['name']
elif kwargs.get('includeDefaults', False):
cmd += ' -A'
else:
cmd += ' -a'
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
# raise CommandExecutionError(sysrcs)
return None
ret = {}
for sysrc in sysrcs.split("\n"):
line_components = sysrc.split(': ')
rcfile = line_components[0]
if len(line_components) > 2:
var = line_components[1]
val = line_components[2]
else:
var = line_components[1].rstrip(':')
val = ''
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = val
return ret
|
Return system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.get includeDefaults=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysrc.py#L30-L74
| null |
# -*- coding: utf-8 -*-
'''
sysrc module for FreeBSD
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
import salt.utils.path
from salt.exceptions import CommandExecutionError
__virtualname__ = 'sysrc'
__func_alias__ = {
'set_': 'set'
}
def __virtual__():
'''
Only runs if sysrc exists
'''
if salt.utils.path.which('sysrc') is not None:
return True
return (False, 'The sysrc execution module failed to load: the sysrc binary is not in the path.')
def set_(name, value, **kwargs):
'''
Set system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.set name=sshd_flags value="-p 2222"
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
# This is here because the YAML parser likes to convert the string literals
# YES, NO, Yes, No, True, False, etc. to boolean types. However, in this case,
# we will check to see if that happened and replace it with "YES" or "NO" because
# those items are accepted in sysrc.
if type(value) == bool:
if value:
value = "YES"
else:
value = "NO"
# This is here for the same reason, except for numbers
if type(value) == int:
value = str(value)
cmd += ' '+name+"=\""+value+"\""
sysrcs = __salt__['cmd.run'](cmd)
ret = {}
for sysrc in sysrcs.split("\n"):
rcfile = sysrc.split(': ')[0]
var = sysrc.split(': ')[1]
oldval = sysrc.split(': ')[2].strip().split("->")[0]
newval = sysrc.split(': ')[2].strip().split("->")[1]
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = newval
return ret
def remove(name, **kwargs):
'''
Remove system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.remove name=sshd_enable
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
cmd += ' -x '+name
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
raise CommandExecutionError(sysrcs)
else:
return name+" removed"
|
saltstack/salt
|
salt/modules/sysrc.py
|
set_
|
python
|
def set_(name, value, **kwargs):
'''
Set system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.set name=sshd_flags value="-p 2222"
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
# This is here because the YAML parser likes to convert the string literals
# YES, NO, Yes, No, True, False, etc. to boolean types. However, in this case,
# we will check to see if that happened and replace it with "YES" or "NO" because
# those items are accepted in sysrc.
if type(value) == bool:
if value:
value = "YES"
else:
value = "NO"
# This is here for the same reason, except for numbers
if type(value) == int:
value = str(value)
cmd += ' '+name+"=\""+value+"\""
sysrcs = __salt__['cmd.run'](cmd)
ret = {}
for sysrc in sysrcs.split("\n"):
rcfile = sysrc.split(': ')[0]
var = sysrc.split(': ')[1]
oldval = sysrc.split(': ')[2].strip().split("->")[0]
newval = sysrc.split(': ')[2].strip().split("->")[1]
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = newval
return ret
|
Set system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.set name=sshd_flags value="-p 2222"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysrc.py#L77-L123
| null |
# -*- coding: utf-8 -*-
'''
sysrc module for FreeBSD
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
import salt.utils.path
from salt.exceptions import CommandExecutionError
__virtualname__ = 'sysrc'
__func_alias__ = {
'set_': 'set'
}
def __virtual__():
'''
Only runs if sysrc exists
'''
if salt.utils.path.which('sysrc') is not None:
return True
return (False, 'The sysrc execution module failed to load: the sysrc binary is not in the path.')
def get(**kwargs):
'''
Return system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.get includeDefaults=True
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
if 'name' in kwargs:
cmd += ' '+kwargs['name']
elif kwargs.get('includeDefaults', False):
cmd += ' -A'
else:
cmd += ' -a'
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
# raise CommandExecutionError(sysrcs)
return None
ret = {}
for sysrc in sysrcs.split("\n"):
line_components = sysrc.split(': ')
rcfile = line_components[0]
if len(line_components) > 2:
var = line_components[1]
val = line_components[2]
else:
var = line_components[1].rstrip(':')
val = ''
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = val
return ret
def remove(name, **kwargs):
'''
Remove system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.remove name=sshd_enable
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
cmd += ' -x '+name
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
raise CommandExecutionError(sysrcs)
else:
return name+" removed"
|
saltstack/salt
|
salt/modules/sysrc.py
|
remove
|
python
|
def remove(name, **kwargs):
'''
Remove system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.remove name=sshd_enable
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
cmd += ' -x '+name
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
raise CommandExecutionError(sysrcs)
else:
return name+" removed"
|
Remove system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.remove name=sshd_enable
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysrc.py#L126-L151
| null |
# -*- coding: utf-8 -*-
'''
sysrc module for FreeBSD
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
import salt.utils.path
from salt.exceptions import CommandExecutionError
__virtualname__ = 'sysrc'
__func_alias__ = {
'set_': 'set'
}
def __virtual__():
'''
Only runs if sysrc exists
'''
if salt.utils.path.which('sysrc') is not None:
return True
return (False, 'The sysrc execution module failed to load: the sysrc binary is not in the path.')
def get(**kwargs):
'''
Return system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.get includeDefaults=True
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
if 'name' in kwargs:
cmd += ' '+kwargs['name']
elif kwargs.get('includeDefaults', False):
cmd += ' -A'
else:
cmd += ' -a'
sysrcs = __salt__['cmd.run'](cmd)
if "sysrc: unknown variable" in sysrcs:
# raise CommandExecutionError(sysrcs)
return None
ret = {}
for sysrc in sysrcs.split("\n"):
line_components = sysrc.split(': ')
rcfile = line_components[0]
if len(line_components) > 2:
var = line_components[1]
val = line_components[2]
else:
var = line_components[1].rstrip(':')
val = ''
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = val
return ret
def set_(name, value, **kwargs):
'''
Set system rc configuration variables
CLI Example:
.. code-block:: bash
salt '*' sysrc.set name=sshd_flags value="-p 2222"
'''
cmd = 'sysrc -v'
if 'file' in kwargs:
cmd += ' -f '+kwargs['file']
if 'jail' in kwargs:
cmd += ' -j '+kwargs['jail']
# This is here because the YAML parser likes to convert the string literals
# YES, NO, Yes, No, True, False, etc. to boolean types. However, in this case,
# we will check to see if that happened and replace it with "YES" or "NO" because
# those items are accepted in sysrc.
if type(value) == bool:
if value:
value = "YES"
else:
value = "NO"
# This is here for the same reason, except for numbers
if type(value) == int:
value = str(value)
cmd += ' '+name+"=\""+value+"\""
sysrcs = __salt__['cmd.run'](cmd)
ret = {}
for sysrc in sysrcs.split("\n"):
rcfile = sysrc.split(': ')[0]
var = sysrc.split(': ')[1]
oldval = sysrc.split(': ')[2].strip().split("->")[0]
newval = sysrc.split(': ')[2].strip().split("->")[1]
if rcfile not in ret:
ret[rcfile] = {}
ret[rcfile][var] = newval
return ret
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
create
|
python
|
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
|
Create a single VM from a data dict
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L193-L346
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def ssh_interface(vm_):\n '''\n Return the ssh_interface type to connect to. Either 'public_ips' (default)\n or 'private_ips'.\n '''\n return config.get_cloud_config_value(\n 'ssh_interface', vm_, __opts__, default='public_ips',\n search_global=False\n )\n",
"def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n",
"def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n driver = get_driver(Provider.DIMENSIONDATA)\n\n region = config.get_cloud_config_value(\n 'region', vm_, __opts__\n )\n\n user_id = config.get_cloud_config_value(\n 'user_id', vm_, __opts__\n )\n key = config.get_cloud_config_value(\n 'key', vm_, __opts__\n )\n\n if key is not None:\n log.debug('DimensionData authenticating using password')\n\n return driver(\n user_id,\n key,\n region=region\n )\n",
"def preferred_ip(vm_, ips):\n '''\n Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.\n '''\n proto = config.get_cloud_config_value(\n 'protocol', vm_, __opts__, default='ipv4', search_global=False\n )\n family = socket.AF_INET\n if proto == 'ipv6':\n family = socket.AF_INET6\n for ip in ips:\n try:\n socket.inet_pton(family, ip)\n return ip\n except Exception:\n continue\n return False\n",
"def _to_event_data(obj):\n '''\n Convert the specified object into a form that can be serialised by msgpack as event data.\n\n :param obj: The object to convert.\n '''\n\n if obj is None:\n return None\n if isinstance(obj, bool):\n return obj\n if isinstance(obj, int):\n return obj\n if isinstance(obj, float):\n return obj\n if isinstance(obj, str):\n return obj\n if isinstance(obj, bytes):\n return obj\n if isinstance(obj, dict):\n return obj\n\n if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)\n return obj.name\n\n if isinstance(obj, list):\n return [_to_event_data(item) for item in obj]\n\n event_data = {}\n for attribute_name in dir(obj):\n if attribute_name.startswith('_'):\n continue\n\n attribute_value = getattr(obj, attribute_name)\n\n if callable(attribute_value): # Strip out methods\n continue\n\n event_data[attribute_name] = _to_event_data(attribute_value)\n\n return event_data\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
create_lb
|
python
|
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
|
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L349-L441
|
[
"def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n driver = get_driver(Provider.DIMENSIONDATA)\n\n region = config.get_cloud_config_value(\n 'region', vm_, __opts__\n )\n\n user_id = config.get_cloud_config_value(\n 'user_id', vm_, __opts__\n )\n key = config.get_cloud_config_value(\n 'key', vm_, __opts__\n )\n\n if key is not None:\n log.debug('DimensionData authenticating using password')\n\n return driver(\n user_id,\n key,\n region=region\n )\n",
"def _to_event_data(obj):\n '''\n Convert the specified object into a form that can be serialised by msgpack as event data.\n\n :param obj: The object to convert.\n '''\n\n if obj is None:\n return None\n if isinstance(obj, bool):\n return obj\n if isinstance(obj, int):\n return obj\n if isinstance(obj, float):\n return obj\n if isinstance(obj, str):\n return obj\n if isinstance(obj, bytes):\n return obj\n if isinstance(obj, dict):\n return obj\n\n if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)\n return obj.name\n\n if isinstance(obj, list):\n return [_to_event_data(item) for item in obj]\n\n event_data = {}\n for attribute_name in dir(obj):\n if attribute_name.startswith('_'):\n continue\n\n attribute_value = getattr(obj, attribute_name)\n\n if callable(attribute_value): # Strip out methods\n continue\n\n event_data[attribute_name] = _to_event_data(attribute_value)\n\n return event_data\n",
"def get_lb_conn(dd_driver=None):\n '''\n Return a load-balancer conn object\n '''\n vm_ = get_configured_provider()\n\n region = config.get_cloud_config_value(\n 'region', vm_, __opts__\n )\n\n user_id = config.get_cloud_config_value(\n 'user_id', vm_, __opts__\n )\n key = config.get_cloud_config_value(\n 'key', vm_, __opts__\n )\n if not dd_driver:\n raise SaltCloudSystemExit(\n 'Missing dimensiondata_driver for get_lb_conn method.'\n )\n return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)\n",
"def _expand_balancer(lb):\n '''\n Convert the libcloud load-balancer object into something more serializable.\n '''\n ret = {}\n ret.update(lb.__dict__)\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
stop
|
python
|
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
|
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L483-L503
|
[
"def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n driver = get_driver(Provider.DIMENSIONDATA)\n\n region = config.get_cloud_config_value(\n 'region', vm_, __opts__\n )\n\n user_id = config.get_cloud_config_value(\n 'user_id', vm_, __opts__\n )\n key = config.get_cloud_config_value(\n 'key', vm_, __opts__\n )\n\n if key is not None:\n log.debug('DimensionData authenticating using password')\n\n return driver(\n user_id,\n key,\n region=region\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
start
|
python
|
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
|
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L506-L527
|
[
"def get_conn():\n '''\n Return a conn object for the passed VM data\n '''\n vm_ = get_configured_provider()\n driver = get_driver(Provider.DIMENSIONDATA)\n\n region = config.get_cloud_config_value(\n 'region', vm_, __opts__\n )\n\n user_id = config.get_cloud_config_value(\n 'user_id', vm_, __opts__\n )\n key = config.get_cloud_config_value(\n 'key', vm_, __opts__\n )\n\n if key is not None:\n log.debug('DimensionData authenticating using password')\n\n return driver(\n user_id,\n key,\n region=region\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
get_conn
|
python
|
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
|
Return a conn object for the passed VM data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L530-L555
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or 'dimensiondata',\n ('user_id', 'key', 'region')\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
get_lb_conn
|
python
|
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
|
Return a load-balancer conn object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L558-L578
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or 'dimensiondata',\n ('user_id', 'key', 'region')\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
saltstack/salt
|
salt/cloud/clouds/dimensiondata.py
|
_to_event_data
|
python
|
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L581-L621
|
[
"def _to_event_data(obj):\n '''\n Convert the specified object into a form that can be serialised by msgpack as event data.\n\n :param obj: The object to convert.\n '''\n\n if obj is None:\n return None\n if isinstance(obj, bool):\n return obj\n if isinstance(obj, int):\n return obj\n if isinstance(obj, float):\n return obj\n if isinstance(obj, str):\n return obj\n if isinstance(obj, bytes):\n return obj\n if isinstance(obj, dict):\n return obj\n\n if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)\n return obj.name\n\n if isinstance(obj, list):\n return [_to_event_data(item) for item in obj]\n\n event_data = {}\n for attribute_name in dir(obj):\n if attribute_name.startswith('_'):\n continue\n\n attribute_value = getattr(obj, attribute_name)\n\n if callable(attribute_value): # Strip out methods\n continue\n\n event_data[attribute_name] = _to_event_data(attribute_value)\n\n return event_data\n"
] |
# -*- coding: utf-8 -*-
'''
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'dimensiondata'
def __virtual__():
'''
Set up the libcloud functions and check for dimensiondata configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in six.iteritems(__opts__['providers']):
if 'dimensiondata' not in details:
continue
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
('user_id', 'key', 'region')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action') # pylint: disable=not-callable
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'dimensiondata',
vm_['profile']) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_['image']][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [y for y in network_domains
if y.name == vm_['network_domain']][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_['network_domain'],
plan='ADVANCED',
description=''
)
try:
vlan = [y for y in conn.ex_list_vlans(
location=location,
network_domain=network_domain)
if y.name == vm_['vlan']][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(
location=location,
network_domain=network_domain)[0]
kwargs = {
'name': vm_['name'],
'image': image,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_data, list(event_data)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on DIMENSIONDATA\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=30),
max_failures=config.get_cloud_config_value(
'wait_for_ip_max_failures', vm_, __opts__, default=60),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name']) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_lb(kwargs=None, call=None):
r'''
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
'''
conn = get_conn()
if call != 'function':
raise SaltCloudSystemExit(
'The create_lb function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating a health check.'
)
return False
if 'port' not in kwargs:
log.error(
'A port or port-range must be specified for the load-balancer.'
)
return False
if 'networkdomain' not in kwargs:
log.error(
'A network domain must be specified for the load-balancer.'
)
return False
if 'members' in kwargs:
members = []
ip = ""
membersList = kwargs.get('members').split(',')
log.debug('MemberList: %s', membersList)
for member in membersList:
try:
log.debug('Member: %s', member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug('Node: %s', node)
ip = node.private_ips[0]
except Exception as err:
log.error(
'Failed to get node ip: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
members.append(Member(ip, ip, kwargs['port']))
else:
members = None
log.debug('Members: %s', members)
networkdomain = kwargs['networkdomain']
name = kwargs['name']
port = kwargs['port']
protocol = kwargs.get('protocol', None)
algorithm = kwargs.get('algorithm', None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
lb = lb_conn.create_balancer(
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_balancer(lb)
def _expand_balancer(lb):
'''
Convert the libcloud load-balancer object into something more serializable.
'''
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def stop(name, call=None):
'''
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_shutdown_graceful(node)
log.debug('Status of Cloud VM: %s', status)
return status
def start(name, call=None):
'''
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug('Node of Cloud VM: %s', node)
status = conn.ex_start_node(node)
log.debug('Status of Cloud VM: %s', status)
return status
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if key is not None:
log.debug('DimensionData authenticating using password')
return driver(
user_id,
key,
region=region
)
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
|
saltstack/salt
|
salt/proxy/dummy.py
|
package_install
|
python
|
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version}
|
Install a "package" on the REST server
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/dummy.py#L164-L175
|
[
"def _save_state(details):\n with salt.utils.files.fopen(FILENAME, 'wb') as pck:\n pickle.dump(details, pck)\n",
"def _load_state():\n try:\n if six.PY3 is True:\n mode = 'rb'\n else:\n mode = 'r'\n\n with salt.utils.files.fopen(FILENAME, mode) as pck:\n DETAILS = pickle.load(pck)\n except EOFError:\n DETAILS = {}\n DETAILS['initialized'] = False\n _save_state(DETAILS)\n\n return DETAILS\n"
] |
# -*- coding: utf-8 -*-
'''
This is the a dummy proxy-minion designed for testing the proxy minion subsystem.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import pickle
import logging
# Import Salt libs
import salt.ext.six as six
import salt.utils.files
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy']
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.debug('dummy proxy __virtual__() called...')
return True
def _save_state(details):
with salt.utils.files.fopen(FILENAME, 'wb') as pck:
pickle.dump(details, pck)
def _load_state():
try:
if six.PY3 is True:
mode = 'rb'
else:
mode = 'r'
with salt.utils.files.fopen(FILENAME, mode) as pck:
DETAILS = pickle.load(pck)
except EOFError:
DETAILS = {}
DETAILS['initialized'] = False
_save_state(DETAILS)
return DETAILS
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
def init(opts):
log.debug('dummy proxy init() called...')
DETAILS['initialized'] = True
_save_state(DETAILS)
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
DETAILS = _load_state()
return DETAILS.get('initialized', False)
def grains():
'''
Make up some grains
'''
DETAILS = _load_state()
if 'grains_cache' not in DETAILS:
DETAILS['grains_cache'] = {'dummy_grain_1': 'one', 'dummy_grain_2': 'two', 'dummy_grain_3': 'three', }
_save_state(DETAILS)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains
'''
DETAILS = _load_state()
DETAILS['grains_cache'] = None
_save_state(DETAILS)
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/rest_sample.py called fns() here in the proxymodule.'}
def service_start(name):
'''
Start a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'running'
_save_state(DETAILS)
return 'running'
def service_stop(name):
'''
Stop a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'stopped'
_save_state(DETAILS)
return 'stopped'
def service_restart(name):
'''
Restart a "service" on the REST server
'''
return True
def service_list():
'''
List "services" on the REST server
'''
DETAILS = _load_state()
return list(DETAILS['services'])
def service_status(name):
'''
Check if a service is running on the REST server
'''
DETAILS = _load_state()
if DETAILS['services'][name] == 'running':
return {'comment': 'running'}
else:
return {'comment': 'stopped'}
def package_list():
'''
List "packages" installed on the REST server
'''
DETAILS = _load_state()
return DETAILS['packages']
def upgrade():
'''
"Upgrade" packages
'''
DETAILS = _load_state()
pkgs = uptodate()
DETAILS['packages'] = pkgs
_save_state(DETAILS)
return pkgs
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages']
def package_remove(name):
'''
Remove a "package" on the REST server
'''
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
DETAILS = _load_state()
if name in DETAILS['packages']:
return {name: DETAILS['packages'][name]}
def ping():
'''
Degenerate ping
'''
log.debug('dummy proxy returning ping')
return True
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
log.debug('dummy proxy shutdown() called...')
DETAILS = _load_state()
if 'filename' in DETAILS:
os.unlink(DETAILS['filename'])
def test_from_state():
'''
Test function so we have something to call from a state
:return:
'''
log.debug('test_from_state called')
return 'testvalue'
|
saltstack/salt
|
salt/proxy/dummy.py
|
upgrade
|
python
|
def upgrade():
'''
"Upgrade" packages
'''
DETAILS = _load_state()
pkgs = uptodate()
DETAILS['packages'] = pkgs
_save_state(DETAILS)
return pkgs
|
"Upgrade" packages
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/dummy.py#L178-L186
|
[
"def _save_state(details):\n with salt.utils.files.fopen(FILENAME, 'wb') as pck:\n pickle.dump(details, pck)\n",
"def _load_state():\n try:\n if six.PY3 is True:\n mode = 'rb'\n else:\n mode = 'r'\n\n with salt.utils.files.fopen(FILENAME, mode) as pck:\n DETAILS = pickle.load(pck)\n except EOFError:\n DETAILS = {}\n DETAILS['initialized'] = False\n _save_state(DETAILS)\n\n return DETAILS\n",
"def uptodate():\n '''\n Call the REST endpoint to see if the packages on the \"server\" are up to date.\n '''\n DETAILS = _load_state()\n for p in DETAILS['packages']:\n version_float = float(DETAILS['packages'][p])\n version_float = version_float + 1.0\n DETAILS['packages'][p] = six.text_type(version_float)\n return DETAILS['packages']\n"
] |
# -*- coding: utf-8 -*-
'''
This is the a dummy proxy-minion designed for testing the proxy minion subsystem.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import pickle
import logging
# Import Salt libs
import salt.ext.six as six
import salt.utils.files
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy']
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.debug('dummy proxy __virtual__() called...')
return True
def _save_state(details):
with salt.utils.files.fopen(FILENAME, 'wb') as pck:
pickle.dump(details, pck)
def _load_state():
try:
if six.PY3 is True:
mode = 'rb'
else:
mode = 'r'
with salt.utils.files.fopen(FILENAME, mode) as pck:
DETAILS = pickle.load(pck)
except EOFError:
DETAILS = {}
DETAILS['initialized'] = False
_save_state(DETAILS)
return DETAILS
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
def init(opts):
log.debug('dummy proxy init() called...')
DETAILS['initialized'] = True
_save_state(DETAILS)
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
DETAILS = _load_state()
return DETAILS.get('initialized', False)
def grains():
'''
Make up some grains
'''
DETAILS = _load_state()
if 'grains_cache' not in DETAILS:
DETAILS['grains_cache'] = {'dummy_grain_1': 'one', 'dummy_grain_2': 'two', 'dummy_grain_3': 'three', }
_save_state(DETAILS)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains
'''
DETAILS = _load_state()
DETAILS['grains_cache'] = None
_save_state(DETAILS)
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/rest_sample.py called fns() here in the proxymodule.'}
def service_start(name):
'''
Start a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'running'
_save_state(DETAILS)
return 'running'
def service_stop(name):
'''
Stop a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'stopped'
_save_state(DETAILS)
return 'stopped'
def service_restart(name):
'''
Restart a "service" on the REST server
'''
return True
def service_list():
'''
List "services" on the REST server
'''
DETAILS = _load_state()
return list(DETAILS['services'])
def service_status(name):
'''
Check if a service is running on the REST server
'''
DETAILS = _load_state()
if DETAILS['services'][name] == 'running':
return {'comment': 'running'}
else:
return {'comment': 'stopped'}
def package_list():
'''
List "packages" installed on the REST server
'''
DETAILS = _load_state()
return DETAILS['packages']
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version}
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages']
def package_remove(name):
'''
Remove a "package" on the REST server
'''
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
DETAILS = _load_state()
if name in DETAILS['packages']:
return {name: DETAILS['packages'][name]}
def ping():
'''
Degenerate ping
'''
log.debug('dummy proxy returning ping')
return True
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
log.debug('dummy proxy shutdown() called...')
DETAILS = _load_state()
if 'filename' in DETAILS:
os.unlink(DETAILS['filename'])
def test_from_state():
'''
Test function so we have something to call from a state
:return:
'''
log.debug('test_from_state called')
return 'testvalue'
|
saltstack/salt
|
salt/proxy/dummy.py
|
uptodate
|
python
|
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages']
|
Call the REST endpoint to see if the packages on the "server" are up to date.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/dummy.py#L189-L198
|
[
"def _load_state():\n try:\n if six.PY3 is True:\n mode = 'rb'\n else:\n mode = 'r'\n\n with salt.utils.files.fopen(FILENAME, mode) as pck:\n DETAILS = pickle.load(pck)\n except EOFError:\n DETAILS = {}\n DETAILS['initialized'] = False\n _save_state(DETAILS)\n\n return DETAILS\n"
] |
# -*- coding: utf-8 -*-
'''
This is the a dummy proxy-minion designed for testing the proxy minion subsystem.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import pickle
import logging
# Import Salt libs
import salt.ext.six as six
import salt.utils.files
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy']
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.debug('dummy proxy __virtual__() called...')
return True
def _save_state(details):
with salt.utils.files.fopen(FILENAME, 'wb') as pck:
pickle.dump(details, pck)
def _load_state():
try:
if six.PY3 is True:
mode = 'rb'
else:
mode = 'r'
with salt.utils.files.fopen(FILENAME, mode) as pck:
DETAILS = pickle.load(pck)
except EOFError:
DETAILS = {}
DETAILS['initialized'] = False
_save_state(DETAILS)
return DETAILS
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
def init(opts):
log.debug('dummy proxy init() called...')
DETAILS['initialized'] = True
_save_state(DETAILS)
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
DETAILS = _load_state()
return DETAILS.get('initialized', False)
def grains():
'''
Make up some grains
'''
DETAILS = _load_state()
if 'grains_cache' not in DETAILS:
DETAILS['grains_cache'] = {'dummy_grain_1': 'one', 'dummy_grain_2': 'two', 'dummy_grain_3': 'three', }
_save_state(DETAILS)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains
'''
DETAILS = _load_state()
DETAILS['grains_cache'] = None
_save_state(DETAILS)
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/rest_sample.py called fns() here in the proxymodule.'}
def service_start(name):
'''
Start a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'running'
_save_state(DETAILS)
return 'running'
def service_stop(name):
'''
Stop a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'stopped'
_save_state(DETAILS)
return 'stopped'
def service_restart(name):
'''
Restart a "service" on the REST server
'''
return True
def service_list():
'''
List "services" on the REST server
'''
DETAILS = _load_state()
return list(DETAILS['services'])
def service_status(name):
'''
Check if a service is running on the REST server
'''
DETAILS = _load_state()
if DETAILS['services'][name] == 'running':
return {'comment': 'running'}
else:
return {'comment': 'stopped'}
def package_list():
'''
List "packages" installed on the REST server
'''
DETAILS = _load_state()
return DETAILS['packages']
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version}
def upgrade():
'''
"Upgrade" packages
'''
DETAILS = _load_state()
pkgs = uptodate()
DETAILS['packages'] = pkgs
_save_state(DETAILS)
return pkgs
def package_remove(name):
'''
Remove a "package" on the REST server
'''
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
DETAILS = _load_state()
if name in DETAILS['packages']:
return {name: DETAILS['packages'][name]}
def ping():
'''
Degenerate ping
'''
log.debug('dummy proxy returning ping')
return True
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
log.debug('dummy proxy shutdown() called...')
DETAILS = _load_state()
if 'filename' in DETAILS:
os.unlink(DETAILS['filename'])
def test_from_state():
'''
Test function so we have something to call from a state
:return:
'''
log.debug('test_from_state called')
return 'testvalue'
|
saltstack/salt
|
salt/proxy/dummy.py
|
package_remove
|
python
|
def package_remove(name):
'''
Remove a "package" on the REST server
'''
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages']
|
Remove a "package" on the REST server
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/dummy.py#L201-L208
|
[
"def _save_state(details):\n with salt.utils.files.fopen(FILENAME, 'wb') as pck:\n pickle.dump(details, pck)\n",
"def _load_state():\n try:\n if six.PY3 is True:\n mode = 'rb'\n else:\n mode = 'r'\n\n with salt.utils.files.fopen(FILENAME, mode) as pck:\n DETAILS = pickle.load(pck)\n except EOFError:\n DETAILS = {}\n DETAILS['initialized'] = False\n _save_state(DETAILS)\n\n return DETAILS\n"
] |
# -*- coding: utf-8 -*-
'''
This is the a dummy proxy-minion designed for testing the proxy minion subsystem.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import pickle
import logging
# Import Salt libs
import salt.ext.six as six
import salt.utils.files
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy']
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.debug('dummy proxy __virtual__() called...')
return True
def _save_state(details):
with salt.utils.files.fopen(FILENAME, 'wb') as pck:
pickle.dump(details, pck)
def _load_state():
try:
if six.PY3 is True:
mode = 'rb'
else:
mode = 'r'
with salt.utils.files.fopen(FILENAME, mode) as pck:
DETAILS = pickle.load(pck)
except EOFError:
DETAILS = {}
DETAILS['initialized'] = False
_save_state(DETAILS)
return DETAILS
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
def init(opts):
log.debug('dummy proxy init() called...')
DETAILS['initialized'] = True
_save_state(DETAILS)
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
DETAILS = _load_state()
return DETAILS.get('initialized', False)
def grains():
'''
Make up some grains
'''
DETAILS = _load_state()
if 'grains_cache' not in DETAILS:
DETAILS['grains_cache'] = {'dummy_grain_1': 'one', 'dummy_grain_2': 'two', 'dummy_grain_3': 'three', }
_save_state(DETAILS)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains
'''
DETAILS = _load_state()
DETAILS['grains_cache'] = None
_save_state(DETAILS)
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/rest_sample.py called fns() here in the proxymodule.'}
def service_start(name):
'''
Start a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'running'
_save_state(DETAILS)
return 'running'
def service_stop(name):
'''
Stop a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'stopped'
_save_state(DETAILS)
return 'stopped'
def service_restart(name):
'''
Restart a "service" on the REST server
'''
return True
def service_list():
'''
List "services" on the REST server
'''
DETAILS = _load_state()
return list(DETAILS['services'])
def service_status(name):
'''
Check if a service is running on the REST server
'''
DETAILS = _load_state()
if DETAILS['services'][name] == 'running':
return {'comment': 'running'}
else:
return {'comment': 'stopped'}
def package_list():
'''
List "packages" installed on the REST server
'''
DETAILS = _load_state()
return DETAILS['packages']
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version}
def upgrade():
'''
"Upgrade" packages
'''
DETAILS = _load_state()
pkgs = uptodate()
DETAILS['packages'] = pkgs
_save_state(DETAILS)
return pkgs
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
DETAILS = _load_state()
if name in DETAILS['packages']:
return {name: DETAILS['packages'][name]}
def ping():
'''
Degenerate ping
'''
log.debug('dummy proxy returning ping')
return True
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
log.debug('dummy proxy shutdown() called...')
DETAILS = _load_state()
if 'filename' in DETAILS:
os.unlink(DETAILS['filename'])
def test_from_state():
'''
Test function so we have something to call from a state
:return:
'''
log.debug('test_from_state called')
return 'testvalue'
|
saltstack/salt
|
salt/proxy/dummy.py
|
shutdown
|
python
|
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
log.debug('dummy proxy shutdown() called...')
DETAILS = _load_state()
if 'filename' in DETAILS:
os.unlink(DETAILS['filename'])
|
For this proxy shutdown is a no-op
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/dummy.py#L228-L235
|
[
"def _load_state():\n try:\n if six.PY3 is True:\n mode = 'rb'\n else:\n mode = 'r'\n\n with salt.utils.files.fopen(FILENAME, mode) as pck:\n DETAILS = pickle.load(pck)\n except EOFError:\n DETAILS = {}\n DETAILS['initialized'] = False\n _save_state(DETAILS)\n\n return DETAILS\n"
] |
# -*- coding: utf-8 -*-
'''
This is the a dummy proxy-minion designed for testing the proxy minion subsystem.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import pickle
import logging
# Import Salt libs
import salt.ext.six as six
import salt.utils.files
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy']
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.debug('dummy proxy __virtual__() called...')
return True
def _save_state(details):
with salt.utils.files.fopen(FILENAME, 'wb') as pck:
pickle.dump(details, pck)
def _load_state():
try:
if six.PY3 is True:
mode = 'rb'
else:
mode = 'r'
with salt.utils.files.fopen(FILENAME, mode) as pck:
DETAILS = pickle.load(pck)
except EOFError:
DETAILS = {}
DETAILS['initialized'] = False
_save_state(DETAILS)
return DETAILS
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
def init(opts):
log.debug('dummy proxy init() called...')
DETAILS['initialized'] = True
_save_state(DETAILS)
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
DETAILS = _load_state()
return DETAILS.get('initialized', False)
def grains():
'''
Make up some grains
'''
DETAILS = _load_state()
if 'grains_cache' not in DETAILS:
DETAILS['grains_cache'] = {'dummy_grain_1': 'one', 'dummy_grain_2': 'two', 'dummy_grain_3': 'three', }
_save_state(DETAILS)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains
'''
DETAILS = _load_state()
DETAILS['grains_cache'] = None
_save_state(DETAILS)
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/rest_sample.py called fns() here in the proxymodule.'}
def service_start(name):
'''
Start a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'running'
_save_state(DETAILS)
return 'running'
def service_stop(name):
'''
Stop a "service" on the dummy server
'''
DETAILS = _load_state()
DETAILS['services'][name] = 'stopped'
_save_state(DETAILS)
return 'stopped'
def service_restart(name):
'''
Restart a "service" on the REST server
'''
return True
def service_list():
'''
List "services" on the REST server
'''
DETAILS = _load_state()
return list(DETAILS['services'])
def service_status(name):
'''
Check if a service is running on the REST server
'''
DETAILS = _load_state()
if DETAILS['services'][name] == 'running':
return {'comment': 'running'}
else:
return {'comment': 'stopped'}
def package_list():
'''
List "packages" installed on the REST server
'''
DETAILS = _load_state()
return DETAILS['packages']
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version}
def upgrade():
'''
"Upgrade" packages
'''
DETAILS = _load_state()
pkgs = uptodate()
DETAILS['packages'] = pkgs
_save_state(DETAILS)
return pkgs
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages']
def package_remove(name):
'''
Remove a "package" on the REST server
'''
DETAILS = _load_state()
DETAILS['packages'].pop(name)
_save_state(DETAILS)
return DETAILS['packages']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
DETAILS = _load_state()
if name in DETAILS['packages']:
return {name: DETAILS['packages'][name]}
def ping():
'''
Degenerate ping
'''
log.debug('dummy proxy returning ping')
return True
def test_from_state():
'''
Test function so we have something to call from a state
:return:
'''
log.debug('test_from_state called')
return 'testvalue'
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector.open
|
python
|
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
|
Opens archive.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L63-L70
| null |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector.close
|
python
|
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
|
Closes the archive.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L72-L81
|
[
"def _flush_content(self):\n '''\n Flush content to the archive\n :return:\n '''\n if self.__current_section is not None:\n buff = BytesIO()\n buff._dirty = False\n for action_return in self.__current_section:\n for title, ret_data in action_return.items():\n if isinstance(ret_data, file):\n self.out.put(ret_data.name, indent=4)\n self.__arch.add(ret_data.name, arcname=ret_data.name)\n else:\n buff.write(salt.utils.stringutils.to_bytes(title + '\\n'))\n buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\\n\\n'))\n buff.write(salt.utils.stringutils.to_bytes(ret_data))\n buff.write(salt.utils.stringutils.to_bytes('\\n\\n\\n'))\n buff._dirty = True\n if buff._dirty:\n buff.seek(0)\n tar_info = tarfile.TarInfo(name=\"{}/{}\".format(self.__default_root, self.__current_section_name))\n if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older\n buff.getbuffer = buff.getvalue\n tar_info.size = len(buff.getbuffer())\n self.__arch.addfile(tarinfo=tar_info, fileobj=buff)\n"
] |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector._flush_content
|
python
|
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
|
Flush content to the archive
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L83-L108
| null |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector.add
|
python
|
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
|
Start a new section.
:param name:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L110-L118
|
[
"def _flush_content(self):\n '''\n Flush content to the archive\n :return:\n '''\n if self.__current_section is not None:\n buff = BytesIO()\n buff._dirty = False\n for action_return in self.__current_section:\n for title, ret_data in action_return.items():\n if isinstance(ret_data, file):\n self.out.put(ret_data.name, indent=4)\n self.__arch.add(ret_data.name, arcname=ret_data.name)\n else:\n buff.write(salt.utils.stringutils.to_bytes(title + '\\n'))\n buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\\n\\n'))\n buff.write(salt.utils.stringutils.to_bytes(ret_data))\n buff.write(salt.utils.stringutils.to_bytes('\\n\\n\\n'))\n buff._dirty = True\n if buff._dirty:\n buff.seek(0)\n tar_info = tarfile.TarInfo(name=\"{}/{}\".format(self.__default_root, self.__current_section_name))\n if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older\n buff.getbuffer = buff.getvalue\n tar_info.size = len(buff.getbuffer())\n self.__arch.addfile(tarinfo=tar_info, fileobj=buff)\n",
"def discard_current(self, name=None):\n '''\n Discard current section\n :return:\n '''\n self.__current_section = []\n self.__current_section_name = name\n"
] |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector._printout
|
python
|
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
|
Use salt outputter to printout content.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L128-L151
| null |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector.write
|
python
|
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
|
Add a data to the current opened section.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L153-L178
| null |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SupportDataCollector.link
|
python
|
def link(self, title, path):
'''
Add a static file on the file system.
:param title:
:param path:
:return:
'''
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
# pylint: disable=W8470
if not isinstance(path, file):
path = salt.utils.files.fopen(path)
self.__current_section.append({title: path})
|
Add a static file on the file system.
:param title:
:param path:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L180-L192
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n"
] |
class SupportDataCollector(object):
'''
Data collector. It behaves just like another outputter,
except it grabs the data to the archive files.
'''
def __init__(self, name, output):
'''
constructor of the data collector
:param name:
:param path:
:param format:
'''
self.archive_path = name
self.__default_outputter = output
self.__format = format
self.__arch = None
self.__current_section = None
self.__current_section_name = None
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
self.out = salt.cli.support.console.MessagesOutput()
def open(self):
'''
Opens archive.
:return:
'''
if self.__arch is not None:
raise salt.exceptions.SaltException('Archive already opened.')
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
def close(self):
'''
Closes the archive.
:return:
'''
if self.__arch is None:
raise salt.exceptions.SaltException('Archive already closed')
self._flush_content()
self.__arch.close()
self.__arch = None
def _flush_content(self):
'''
Flush content to the archive
:return:
'''
if self.__current_section is not None:
buff = BytesIO()
buff._dirty = False
for action_return in self.__current_section:
for title, ret_data in action_return.items():
if isinstance(ret_data, file):
self.out.put(ret_data.name, indent=4)
self.__arch.add(ret_data.name, arcname=ret_data.name)
else:
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
buff.write(salt.utils.stringutils.to_bytes(ret_data))
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
buff._dirty = True
if buff._dirty:
buff.seek(0)
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
buff.getbuffer = buff.getvalue
tar_info.size = len(buff.getbuffer())
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
def add(self, name):
'''
Start a new section.
:param name:
:return:
'''
if self.__current_section:
self._flush_content()
self.discard_current(name)
def discard_current(self, name=None):
'''
Discard current section
:return:
'''
self.__current_section = []
self.__current_section_name = name
def _printout(self, data, output):
'''
Use salt outputter to printout content.
:return:
'''
opts = {'extension_modules': '', 'color': False}
try:
printout = salt.output.get_printout(output, opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
try:
printout = salt.output.get_printout('nested', opts)(data)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError) as err:
log.debug(err, exc_info=True)
printout = salt.output.get_printout('raw', opts)(data)
if printout is not None:
return printout.rstrip()
return salt.output.try_printout(data, output, opts)
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content})
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._setup_fun_config
|
python
|
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
|
Setup function configuration.
:param conf:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L203-L220
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._get_runner
|
python
|
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
|
Get & setup runner.
:param conf:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L222-L234
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._get_caller
|
python
|
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
|
Get & setup caller from the factory.
:param conf:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L236-L248
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._local_call
|
python
|
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
|
Execute local call
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L250-L264
|
[
"def _get_caller(self, conf):\n '''\n Get & setup caller from the factory.\n\n :param conf:\n :return:\n '''\n conf = self._setup_fun_config(copy.deepcopy(conf))\n if not getattr(self, '_caller', None):\n self._caller = salt.cli.caller.Caller.factory(conf)\n else:\n self._caller.opts = conf\n return self._caller\n"
] |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._local_run
|
python
|
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
|
Execute local runner
:param run_conf:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L266-L282
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._internal_function_call
|
python
|
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
|
Call internal function.
:param call_conf:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L284-L303
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._get_action
|
python
|
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
|
Parse action and turn into a calling point.
:param action_meta:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L305-L327
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport.collect_internal_data
|
python
|
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
|
Dumps current running pillars, configuration etc.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L329-L346
|
[
"def _local_call(self, call_conf):\n '''\n Execute local call\n '''\n try:\n ret = self._get_caller(call_conf).call()\n except SystemExit:\n ret = 'Data is not available at this moment'\n self.out.error(ret)\n except Exception as ex:\n ret = 'Unhandled exception occurred: {}'.format(ex)\n log.debug(ex, exc_info=True)\n self.out.error(ret)\n\n return ret\n"
] |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._extract_return
|
python
|
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
|
Extracts return data from the results.
:param data:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L348-L358
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport.collect_local_data
|
python
|
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
|
Collects master system data.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L360-L407
|
[
"def get_profile(profile, caller, runner):\n '''\n Get profile.\n\n :param profile:\n :return:\n '''\n profiles = profile.split(',')\n data = {}\n for profile in profiles:\n if os.path.basename(profile) == profile:\n profile = profile.split('.')[0] # Trim extension if someone added it\n profile_path = os.path.join(os.path.dirname(__file__), 'profiles', profile + '.yml')\n else:\n profile_path = profile\n if os.path.exists(profile_path):\n try:\n rendered_template = _render_profile(profile_path, caller, runner)\n line = '-' * 80\n log.debug('\\n%s\\n%s\\n%s\\n', line, rendered_template, line)\n data.update(yaml.load(rendered_template))\n except Exception as ex:\n log.debug(ex, exc_info=True)\n raise salt.exceptions.SaltException('Rendering profile failed: {}'.format(ex))\n else:\n raise salt.exceptions.SaltException('Profile \"{}\" is not found.'.format(profile))\n\n return data\n"
] |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._get_action_type
|
python
|
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
|
Get action type.
:param action:
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L409-L419
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._cleanup
|
python
|
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
|
Cleanup if crash/exception
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L421-L434
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/cli/support/collector.py
|
SaltSupport._check_existing_archive
|
python
|
def _check_existing_archive(self):
'''
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
'''
if os.path.exists(self.config['support_archive']):
if self.config['support_archive_force_overwrite']:
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while trying to overwrite existing archive.'.format(err))
ret = True
else:
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
ret = False
else:
ret = True
return ret
|
Check if archive exists or not. If exists and --force was not specified,
bail out. Otherwise remove it and move on.
:return:
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L436-L458
| null |
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
'''
Class to run Salt Support subsystem.
'''
RUNNER_TYPE = 'run'
CALL_TYPE = 'call'
def _setup_fun_config(self, fun_conf):
'''
Setup function configuration.
:param conf:
:return:
'''
conf = copy.deepcopy(self.config)
conf['file_client'] = 'local'
conf['fun'] = ''
conf['arg'] = []
conf['kwarg'] = {}
conf['cache_jobs'] = False
conf['print_metadata'] = False
conf.update(fun_conf)
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
return conf
def _get_runner(self, conf):
'''
Get & setup runner.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_runner', None):
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
else:
self._runner.opts = conf
return self._runner
def _get_caller(self, conf):
'''
Get & setup caller from the factory.
:param conf:
:return:
'''
conf = self._setup_fun_config(copy.deepcopy(conf))
if not getattr(self, '_caller', None):
self._caller = salt.cli.caller.Caller.factory(conf)
else:
self._caller.opts = conf
return self._caller
def _local_call(self, call_conf):
'''
Execute local call
'''
try:
ret = self._get_caller(call_conf).call()
except SystemExit:
ret = 'Data is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
self.out.error(ret)
return ret
def _local_run(self, run_conf):
'''
Execute local runner
:param run_conf:
:return:
'''
try:
ret = self._get_runner(run_conf).run()
except SystemExit:
ret = 'Runner is not available at this moment'
self.out.error(ret)
except Exception as ex:
ret = 'Unhandled exception occurred: {}'.format(ex)
log.debug(ex, exc_info=True)
return ret
def _internal_function_call(self, call_conf):
'''
Call internal function.
:param call_conf:
:return:
'''
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf['fun'], call_conf['arg'], call_conf['kwargs']
)
return message
return getattr(salt.cli.support.intfunc,
call_conf['fun'], stub)(self.collector,
*call_conf['arg'],
**call_conf['kwargs'])
def _get_action(self, action_meta):
'''
Parse action and turn into a calling point.
:param action_meta:
:return:
'''
conf = {
'fun': list(action_meta.keys())[0],
'arg': [],
'kwargs': {},
}
if not len(conf['fun'].split('.')) - 1:
conf['salt.int.intfunc'] = True
action_meta = action_meta[conf['fun']]
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
for arg in action_meta.get('args') or []:
if not isinstance(arg, dict):
conf['arg'].append(arg)
else:
conf['kwargs'].update(arg)
return info, action_meta.get('output'), conf
def collect_internal_data(self):
'''
Dumps current running pillars, configuration etc.
:return:
'''
section = 'configuration'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving config', indent=2)
self.collector.write('General Configuration', self.config)
self.out.put('Saving pillars', indent=2)
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
section = 'highstate'
self.out.put(section)
self.collector.add(section)
self.out.put('Saving highstate', indent=2)
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
def _extract_return(self, data):
'''
Extracts return data from the results.
:param data:
:return:
'''
if isinstance(data, dict):
data = data.get('return', data)
return data
def collect_local_data(self, profile=None, profile_source=None):
'''
Collects master system data.
:return:
'''
def call(func, *args, **kwargs):
'''
Call wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
def run(func, *args, **kwargs):
'''
Runner wrapper for templates
:param func:
:return:
'''
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
scenario = profile_source or salt.cli.support.get_profile(profile or self.config['support_profile'], call, run)
for category_name in scenario:
self.out.put(category_name)
self.collector.add(category_name)
for action in scenario[category_name]:
if not action:
continue
action_name = next(iter(action))
if not isinstance(action[action_name], six.string_types):
info, output, conf = self._get_action(action)
action_type = self._get_action_type(action) # run:<something> for runners
if action_type == self.RUNNER_TYPE:
self.out.put('Running {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_run(conf), output=output)
elif action_type == self.CALL_TYPE:
if not conf.get('salt.int.intfunc'):
self.out.put('Collecting {}'.format(info.lower()), indent=2)
self.collector.write(info, self._local_call(conf), output=output)
else:
self.collector.discard_current()
self._internal_function_call(conf)
else:
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
else:
# TODO: This needs to be moved then to the utils.
# But the code is not yet there (other PRs)
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
def _get_action_type(self, action):
'''
Get action type.
:param action:
:return:
'''
action_name = next(iter(action or {'': None}))
if ':' not in action_name:
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
return action_name.split(':')[0] or None
def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err))
def run(self):
exit_code = salt.defaults.exitcodes.EX_OK
self.out = salt.cli.support.console.MessagesOutput()
try:
self.parse_args()
except (Exception, SystemExit) as ex:
if not isinstance(ex, exceptions.SystemExit):
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
elif isinstance(ex, exceptions.SystemExit):
exit_code = ex.code
else:
exit_code = salt.defaults.exitcodes.EX_GENERIC
self.out.error(ex)
else:
if self.config['log_level'] not in ('quiet', ):
self.setup_logfile_logger()
salt.utils.verify.verify_log(self.config)
salt.cli.support.log = log # Pass update logger so trace is available
if self.config['support_profile_list']:
self.out.put('List of available profiles:')
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, profile)
exit_code = salt.defaults.exitcodes.EX_OK
elif self.config['support_show_units']:
self.out.put('List of available units:')
for idx, unit in enumerate(self.find_existing_configs(None)):
msg_template = ' {}. '.format(idx + 1) + '{}'
self.out.highlight(msg_template, unit)
exit_code = salt.defaults.exitcodes.EX_OK
else:
if not self.config['support_profile']:
self.print_help()
raise SystemExit()
if self._check_existing_archive():
try:
self.collector = SupportDataCollector(self.config['support_archive'],
output=self.config['support_output_format'])
except Exception as ex:
self.out.error(ex)
exit_code = salt.defaults.exitcodes.EX_GENERIC
log.debug(ex, exc_info=True)
else:
try:
self.collector.open()
self.collect_local_data()
self.collect_internal_data()
self.collector.close()
archive_path = self.collector.archive_path
self.out.highlight('\nSupport data has been written to "{}" file.\n',
archive_path, _main='YELLOW')
except Exception as ex:
self.out.error(ex)
log.debug(ex, exc_info=True)
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
if exit_code:
self._cleanup()
sys.exit(exit_code)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient.run
|
python
|
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
|
Run the SPM command
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L118-L149
|
[
"def _close(self):\n if self.db_conn:\n self.db_conn.close()\n",
"def _list(self, args):\n '''\n Process local commands\n '''\n args.pop(0)\n command = args[0]\n if command == 'packages':\n self._list_packages(args)\n elif command == 'files':\n self._list_files(args)\n elif command == 'repos':\n self._repo_list(args)\n else:\n raise SPMInvocationError('Invalid list command \\'{0}\\''.format(command))\n",
"def _local(self, args):\n '''\n Process local commands\n '''\n args.pop(0)\n command = args[0]\n if command == 'install':\n self._local_install(args)\n elif command == 'files':\n self._local_list_files(args)\n elif command == 'info':\n self._local_info(args)\n else:\n raise SPMInvocationError('Invalid local command \\'{0}\\''.format(command))\n",
"def _repo(self, args):\n '''\n Process repo commands\n '''\n args.pop(0)\n command = args[0]\n if command == 'list':\n self._repo_list(args)\n elif command == 'packages':\n self._repo_packages(args)\n elif command == 'search':\n self._repo_packages(args, search=True)\n elif command == 'update':\n self._download_repo_metadata(args)\n elif command == 'create':\n self._create_repo(args)\n else:\n raise SPMInvocationError('Invalid repo command \\'{0}\\''.format(command))\n",
"def _install(self, args):\n '''\n Install a package from a repo\n '''\n if len(args) < 2:\n raise SPMInvocationError('A package must be specified')\n\n caller_opts = self.opts.copy()\n caller_opts['file_client'] = 'local'\n self.caller = salt.client.Caller(mopts=caller_opts)\n self.client = salt.client.get_local_client(self.opts['conf_file'])\n cache = salt.cache.Cache(self.opts)\n\n packages = args[1:]\n file_map = {}\n optional = []\n recommended = []\n to_install = []\n for pkg in packages:\n if pkg.endswith('.spm'):\n if self._pkgfiles_fun('path_exists', pkg):\n comps = pkg.split('-')\n comps = os.path.split('-'.join(comps[:-2]))\n pkg_name = comps[-1]\n\n formula_tar = tarfile.open(pkg, 'r:bz2')\n formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))\n formula_def = salt.utils.yaml.safe_load(formula_ref)\n\n file_map[pkg_name] = pkg\n to_, op_, re_ = self._check_all_deps(\n pkg_name=pkg_name,\n pkg_file=pkg,\n formula_def=formula_def\n )\n to_install.extend(to_)\n optional.extend(op_)\n recommended.extend(re_)\n formula_tar.close()\n else:\n raise SPMInvocationError('Package file {0} not found'.format(pkg))\n else:\n to_, op_, re_ = self._check_all_deps(pkg_name=pkg)\n to_install.extend(to_)\n optional.extend(op_)\n recommended.extend(re_)\n\n optional = set(filter(len, optional))\n if optional:\n self.ui.status('The following dependencies are optional:\\n\\t{0}\\n'.format(\n '\\n\\t'.join(optional)\n ))\n recommended = set(filter(len, recommended))\n if recommended:\n self.ui.status('The following dependencies are recommended:\\n\\t{0}\\n'.format(\n '\\n\\t'.join(recommended)\n ))\n\n to_install = set(filter(len, to_install))\n msg = 'Installing packages:\\n\\t{0}\\n'.format('\\n\\t'.join(to_install))\n if not self.opts['assume_yes']:\n self.ui.confirm(msg)\n\n repo_metadata = self._get_repo_metadata()\n\n dl_list = {}\n for package in to_install:\n if package in file_map:\n self._install_indv_pkg(package, file_map[package])\n else:\n for repo in repo_metadata:\n repo_info = repo_metadata[repo]\n if package in repo_info['packages']:\n dl_package = False\n repo_ver = repo_info['packages'][package]['info']['version']\n repo_rel = repo_info['packages'][package]['info']['release']\n repo_url = repo_info['info']['url']\n if package in dl_list:\n # Check package version, replace if newer version\n if repo_ver == dl_list[package]['version']:\n # Version is the same, check release\n if repo_rel > dl_list[package]['release']:\n dl_package = True\n elif repo_rel == dl_list[package]['release']:\n # Version and release are the same, give\n # preference to local (file://) repos\n if dl_list[package]['source'].startswith('file://'):\n if not repo_url.startswith('file://'):\n dl_package = True\n elif repo_ver > dl_list[package]['version']:\n dl_package = True\n else:\n dl_package = True\n\n if dl_package is True:\n # Put together download directory\n cache_path = os.path.join(\n self.opts['spm_cache_dir'],\n repo\n )\n\n # Put together download paths\n dl_url = '{0}/{1}'.format(\n repo_info['info']['url'],\n repo_info['packages'][package]['filename']\n )\n out_file = os.path.join(\n cache_path,\n repo_info['packages'][package]['filename']\n )\n dl_list[package] = {\n 'version': repo_ver,\n 'release': repo_rel,\n 'source': dl_url,\n 'dest_dir': cache_path,\n 'dest_file': out_file,\n }\n\n for package in dl_list:\n dl_url = dl_list[package]['source']\n cache_path = dl_list[package]['dest_dir']\n out_file = dl_list[package]['dest_file']\n\n # Make sure download directory exists\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n\n # Download the package\n if dl_url.startswith('file://'):\n dl_url = dl_url.replace('file://', '')\n shutil.copyfile(dl_url, out_file)\n else:\n with salt.utils.files.fopen(out_file, 'w') as outf:\n outf.write(self._query_http(dl_url, repo_info['info']))\n\n # First we download everything, then we install\n for package in dl_list:\n out_file = dl_list[package]['dest_file']\n # Kick off the install\n self._install_indv_pkg(package, out_file)\n return\n",
"def _download_repo_metadata(self, args):\n '''\n Connect to all repos and download metadata\n '''\n cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])\n\n def _update_metadata(repo, repo_info):\n dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])\n if dl_path.startswith('file://'):\n dl_path = dl_path.replace('file://', '')\n with salt.utils.files.fopen(dl_path, 'r') as rpm:\n metadata = salt.utils.yaml.safe_load(rpm)\n else:\n metadata = self._query_http(dl_path, repo_info)\n\n cache.store('.', repo, metadata)\n\n repo_name = args[1] if len(args) > 1 else None\n self._traverse_repos(_update_metadata, repo_name)\n",
"def _create_repo(self, args):\n '''\n Scan a directory and create an SPM-METADATA file which describes\n all of the SPM files in that directory.\n '''\n if len(args) < 2:\n raise SPMInvocationError('A path to a directory must be specified')\n\n if args[1] == '.':\n repo_path = os.getcwdu()\n else:\n repo_path = args[1]\n\n old_files = []\n repo_metadata = {}\n for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):\n for spm_file in filenames:\n if not spm_file.endswith('.spm'):\n continue\n spm_path = '{0}/{1}'.format(repo_path, spm_file)\n if not tarfile.is_tarfile(spm_path):\n continue\n comps = spm_file.split('-')\n spm_name = '-'.join(comps[:-2])\n spm_fh = tarfile.open(spm_path, 'r:bz2')\n formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))\n formula_conf = salt.utils.yaml.safe_load(formula_handle.read())\n\n use_formula = True\n if spm_name in repo_metadata:\n # This package is already in the repo; use the latest\n cur_info = repo_metadata[spm_name]['info']\n new_info = formula_conf\n if int(new_info['version']) == int(cur_info['version']):\n # Version is the same, check release\n if int(new_info['release']) < int(cur_info['release']):\n # This is an old release; don't use it\n use_formula = False\n elif int(new_info['version']) < int(cur_info['version']):\n # This is an old version; don't use it\n use_formula = False\n\n if use_formula is True:\n # Ignore/archive/delete the old version\n log.debug(\n '%s %s-%s had been added, but %s-%s will replace it',\n spm_name, cur_info['version'], cur_info['release'],\n new_info['version'], new_info['release']\n )\n old_files.append(repo_metadata[spm_name]['filename'])\n else:\n # Ignore/archive/delete the new version\n log.debug(\n '%s %s-%s has been found, but is older than %s-%s',\n spm_name, new_info['version'], new_info['release'],\n cur_info['version'], cur_info['release']\n )\n old_files.append(spm_file)\n\n if use_formula is True:\n log.debug(\n 'adding %s-%s-%s to the repo',\n formula_conf['name'], formula_conf['version'],\n formula_conf['release']\n )\n repo_metadata[spm_name] = {\n 'info': formula_conf.copy(),\n }\n repo_metadata[spm_name]['filename'] = spm_file\n\n metadata_filename = '{0}/SPM-METADATA'.format(repo_path)\n with salt.utils.files.fopen(metadata_filename, 'w') as mfh:\n salt.utils.yaml.safe_dump(\n repo_metadata,\n mfh,\n indent=4,\n canonical=False,\n default_flow_style=False,\n )\n\n log.debug('Wrote %s', metadata_filename)\n\n for file_ in old_files:\n if self.opts['spm_repo_dups'] == 'ignore':\n # ignore old packages, but still only add the latest\n log.debug('%s will be left in the directory', file_)\n elif self.opts['spm_repo_dups'] == 'archive':\n # spm_repo_archive_path is where old packages are moved\n if not os.path.exists('./archive'):\n try:\n os.makedirs('./archive')\n log.debug('%s has been archived', file_)\n except IOError:\n log.error('Unable to create archive directory')\n try:\n shutil.move(file_, './archive')\n except (IOError, OSError):\n log.error('Unable to archive %s', file_)\n elif self.opts['spm_repo_dups'] == 'delete':\n # delete old packages from the repo\n try:\n os.remove(file_)\n log.debug('%s has been deleted', file_)\n except IOError:\n log.error('Unable to delete %s', file_)\n except OSError:\n # The file has already been deleted\n pass\n",
"def _remove(self, args):\n '''\n Remove a package\n '''\n if len(args) < 2:\n raise SPMInvocationError('A package must be specified')\n\n packages = args[1:]\n msg = 'Removing packages:\\n\\t{0}'.format('\\n\\t'.join(packages))\n\n if not self.opts['assume_yes']:\n self.ui.confirm(msg)\n\n for package in packages:\n self.ui.status('... removing {0}'.format(package))\n\n if not self._pkgdb_fun('db_exists', self.opts['spm_db']):\n raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))\n\n # Look at local repo index\n pkg_info = self._pkgdb_fun('info', package, self.db_conn)\n if pkg_info is None:\n raise SPMInvocationError('Package {0} not installed'.format(package))\n\n # Find files that have not changed and remove them\n files = self._pkgdb_fun('list_files', package, self.db_conn)\n dirs = []\n for filerow in files:\n if self._pkgfiles_fun('path_isdir', filerow[0]):\n dirs.append(filerow[0])\n continue\n file_hash = hashlib.sha1()\n digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)\n if filerow[1] == digest:\n self._verbose('Removing file {0}'.format(filerow[0]), log.trace)\n self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)\n else:\n self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)\n self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)\n\n # Clean up directories\n for dir_ in sorted(dirs, reverse=True):\n self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)\n try:\n self._verbose('Removing directory {0}'.format(dir_), log.trace)\n os.rmdir(dir_)\n except OSError:\n # Leave directories in place that still have files in them\n self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)\n\n self._pkgdb_fun('unregister_pkg', package, self.db_conn)\n",
"def _info(self, args):\n '''\n List info for a package\n '''\n if len(args) < 2:\n raise SPMInvocationError('A package must be specified')\n\n package = args[1]\n\n pkg_info = self._pkgdb_fun('info', package, self.db_conn)\n if pkg_info is None:\n raise SPMPackageError('package {0} not installed'.format(package))\n self.ui.status(self._get_info(pkg_info))\n",
"def _list_files(self, args):\n '''\n List files for an installed package\n '''\n if len(args) < 2:\n raise SPMInvocationError('A package name must be specified')\n\n package = args[-1]\n\n files = self._pkgdb_fun('list_files', package, self.db_conn)\n if files is None:\n raise SPMPackageError('package {0} not installed'.format(package))\n else:\n for file_ in files:\n if self.opts['verbose']:\n status_msg = ','.join(file_)\n else:\n status_msg = file_[0]\n self.ui.status(status_msg)\n",
"def _build(self, args):\n '''\n Build a package\n '''\n if len(args) < 2:\n raise SPMInvocationError('A path to a formula must be specified')\n\n self.abspath = args[1].rstrip('/')\n comps = self.abspath.split('/')\n self.relpath = comps[-1]\n\n formula_path = '{0}/FORMULA'.format(self.abspath)\n if not os.path.exists(formula_path):\n raise SPMPackageError('Formula file {0} not found'.format(formula_path))\n with salt.utils.files.fopen(formula_path) as fp_:\n formula_conf = salt.utils.yaml.safe_load(fp_)\n\n for field in ('name', 'version', 'release', 'summary', 'description'):\n if field not in formula_conf:\n raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))\n\n out_path = '{0}/{1}-{2}-{3}.spm'.format(\n self.opts['spm_build_dir'],\n formula_conf['name'],\n formula_conf['version'],\n formula_conf['release'],\n )\n\n if not os.path.exists(self.opts['spm_build_dir']):\n os.mkdir(self.opts['spm_build_dir'])\n\n self.formula_conf = formula_conf\n\n formula_tar = tarfile.open(out_path, 'w:bz2')\n\n if 'files' in formula_conf:\n # This allows files to be added to the SPM file in a specific order.\n # It also allows for files to be tagged as a certain type, as with\n # RPM files. This tag is ignored here, but is used when installing\n # the SPM file.\n if isinstance(formula_conf['files'], list):\n formula_dir = tarfile.TarInfo(formula_conf['name'])\n formula_dir.type = tarfile.DIRTYPE\n formula_tar.addfile(formula_dir)\n for file_ in formula_conf['files']:\n for ftype in FILE_TYPES:\n if file_.startswith('{0}|'.format(ftype)):\n file_ = file_.lstrip('{0}|'.format(ftype))\n formula_tar.add(\n os.path.join(os.getcwd(), file_),\n os.path.join(formula_conf['name'], file_),\n )\n else:\n # If no files are specified, then the whole directory will be added.\n try:\n formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)\n formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)\n except TypeError:\n formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)\n formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)\n formula_tar.close()\n\n self.ui.status('Built package {0}'.format(out_path))\n"
] |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._list
|
python
|
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
|
Process local commands
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L163-L176
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._local
|
python
|
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
|
Process local commands
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L178-L191
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._repo
|
python
|
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
|
Process repo commands
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L193-L210
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._repo_packages
|
python
|
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
|
List packages for one or more configured repos
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L212-L228
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._repo_list
|
python
|
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
|
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L230-L238
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._install
|
python
|
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
|
Install a package from a repo
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L240-L380
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._local_install
|
python
|
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
|
Install a package from a file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L382-L389
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._check_all_deps
|
python
|
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
|
Starting with one package, check all packages for dependencies
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L391-L463
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._install_indv_pkg
|
python
|
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
|
Install one individual package
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L465-L567
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._resolve_deps
|
python
|
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
|
Return a list of packages which need to be installed, to resolve all
dependencies
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L569-L618
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._traverse_repos
|
python
|
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
|
Traverse through all repo files and apply the functionality provided in
the callback to them
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L620-L644
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._query_http
|
python
|
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
|
Download files via http
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L646-L683
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._download_repo_metadata
|
python
|
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
|
Connect to all repos and download metadata
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L685-L703
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._get_repo_metadata
|
python
|
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
|
Return cached repo metadata
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L705-L723
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._create_repo
|
python
|
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
|
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L725-L832
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._remove
|
python
|
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
|
Remove a package
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L834-L884
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._verbose
|
python
|
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
|
Display verbose information
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L886-L892
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._local_info
|
python
|
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
|
List info for a package file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L894-L915
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._info
|
python
|
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
|
List info for a package
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L917-L929
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._get_info
|
python
|
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
|
Get package info
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L931-L965
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._local_list_files
|
python
|
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
|
List files for a package file
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L967-L981
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._list_packages
|
python
|
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
|
List files for an installed package
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L983-L993
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._list_files
|
python
|
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
|
List files for an installed package
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L995-L1013
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._build
|
python
|
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
|
Build a package
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L1015-L1077
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._exclude
|
python
|
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
|
Exclude based on opts
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L1079-L1091
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
saltstack/salt
|
salt/spm/__init__.py
|
SPMClient._render
|
python
|
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
Render a [pre|post]_local_state or [pre|post]_tgt_state script
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L1093-L1112
| null |
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self.db_conn = None
self.files_conn = None
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
if not self.db_conn:
self.db_conn = self._pkgdb_fun('init')
if not self.files_conn:
self.files_conn = self._pkgfiles_fun('init')
def _close(self):
if self.db_conn:
self.db_conn.close()
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'repo':
self._repo(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
elif command == 'list':
self._list(args)
elif command == 'close':
self._close()
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(six.text_type(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _list(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'packages':
self._list_packages(args)
elif command == 'files':
self._list_files(args)
elif command == 'repos':
self._repo_list(args)
else:
raise SPMInvocationError('Invalid list command \'{0}\''.format(command))
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _repo(self, args):
'''
Process repo commands
'''
args.pop(0)
command = args[0]
if command == 'list':
self._repo_list(args)
elif command == 'packages':
self._repo_packages(args)
elif command == 'search':
self._repo_packages(args, search=True)
elif command == 'update':
self._download_repo_metadata(args)
elif command == 'create':
self._create_repo(args)
else:
raise SPMInvocationError('Invalid repo command \'{0}\''.format(command))
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages
def _repo_list(self, args):
'''
List configured repos
This can be called either as a ``repo`` command or a ``list`` command
'''
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
self.ui.status(repo)
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if unavail:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
if salt.utils.platform.is_windows():
uname = gname = salt.utils.win_functions.get_current_user()
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
uid = self.opts.get('spm_uid', uname_sid)
gid = self.opts.get('spm_gid', uname_sid)
else:
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _query_http(self, dl_path, repo_info):
'''
Download files via http
'''
query = None
response = None
try:
if 'username' in repo_info:
try:
if 'password' in repo_info:
query = http.query(
dl_path, text=True,
username=repo_info['username'],
password=repo_info['password']
)
else:
raise SPMException('Auth defined, but password is not set for username: \'{0}\''
.format(repo_info['username']))
except SPMException as exc:
self.ui.error(six.text_type(exc))
else:
query = http.query(dl_path, text=True)
except SPMException as exc:
self.ui.error(six.text_type(exc))
try:
if query:
if 'SPM-METADATA' in dl_path:
response = salt.utils.yaml.safe_load(query.get('text', '{}'))
else:
response = query.get('text')
else:
raise SPMException('Response is empty, please check for Errors above.')
except SPMException as exc:
self.ui.error(six.text_type(exc))
return response
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.files.fopen(dl_path, 'r') as rpm:
metadata = salt.utils.yaml.safe_load(rpm)
else:
metadata = self._query_http(dl_path, repo_info)
cache.store('.', repo, metadata)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir'])
metadata = {}
def _read_metadata(repo, repo_info):
if cache.updated('.', repo) is None:
log.warning('Updating repo metadata')
self._download_repo_metadata({})
metadata[repo] = {
'info': repo_info,
'packages': cache.fetch('.', repo),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.getcwdu()
else:
repo_path = args[1]
old_files = []
repo_metadata = {}
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = salt.utils.yaml.safe_load(formula_handle.read())
use_formula = True
if spm_name in repo_metadata:
# This package is already in the repo; use the latest
cur_info = repo_metadata[spm_name]['info']
new_info = formula_conf
if int(new_info['version']) == int(cur_info['version']):
# Version is the same, check release
if int(new_info['release']) < int(cur_info['release']):
# This is an old release; don't use it
use_formula = False
elif int(new_info['version']) < int(cur_info['version']):
# This is an old version; don't use it
use_formula = False
if use_formula is True:
# Ignore/archive/delete the old version
log.debug(
'%s %s-%s had been added, but %s-%s will replace it',
spm_name, cur_info['version'], cur_info['release'],
new_info['version'], new_info['release']
)
old_files.append(repo_metadata[spm_name]['filename'])
else:
# Ignore/archive/delete the new version
log.debug(
'%s %s-%s has been found, but is older than %s-%s',
spm_name, new_info['version'], new_info['release'],
cur_info['version'], cur_info['release']
)
old_files.append(spm_file)
if use_formula is True:
log.debug(
'adding %s-%s-%s to the repo',
formula_conf['name'], formula_conf['version'],
formula_conf['release']
)
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.files.fopen(metadata_filename, 'w') as mfh:
salt.utils.yaml.safe_dump(
repo_metadata,
mfh,
indent=4,
canonical=False,
default_flow_style=False,
)
log.debug('Wrote %s', metadata_filename)
for file_ in old_files:
if self.opts['spm_repo_dups'] == 'ignore':
# ignore old packages, but still only add the latest
log.debug('%s will be left in the directory', file_)
elif self.opts['spm_repo_dups'] == 'archive':
# spm_repo_archive_path is where old packages are moved
if not os.path.exists('./archive'):
try:
os.makedirs('./archive')
log.debug('%s has been archived', file_)
except IOError:
log.error('Unable to create archive directory')
try:
shutil.move(file_, './archive')
except (IOError, OSError):
log.error('Unable to archive %s', file_)
elif self.opts['spm_repo_dups'] == 'delete':
# delete old packages from the repo
try:
os.remove(file_)
log.debug('%s has been deleted', file_)
except IOError:
log.error('Unable to delete %s', file_)
except OSError:
# The file has already been deleted
pass
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
formula_tar.close()
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_packages(self, args):
'''
List files for an installed package
'''
packages = self._pkgdb_fun('list_packages', self.db_conn)
for package in packages:
if self.opts['verbose']:
status_msg = ','.join(package)
else:
status_msg = package[0]
self.ui.status(status_msg)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
_preprocess_attributes
|
python
|
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
|
Pre-process incoming queue attributes before setting them
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L88-L104
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
exists
|
python
|
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L107-L126
| null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
create
|
python
|
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L129-L156
|
[
"def _preprocess_attributes(attributes):\n '''\n Pre-process incoming queue attributes before setting them\n '''\n if isinstance(attributes, six.string_types):\n attributes = salt.utils.json.loads(attributes)\n\n def stringified(val):\n # Some attributes take full json policy documents, but they take them\n # as json strings. Convert the value back into a json string.\n if isinstance(val, dict):\n return salt.utils.json.dumps(val)\n return val\n\n return dict(\n (attr, stringified(val)) for attr, val in six.iteritems(attributes)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
delete
|
python
|
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L159-L176
| null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
list_
|
python
|
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L179-L203
| null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
get_attributes
|
python
|
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L206-L223
| null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
saltstack/salt
|
salt/modules/boto_sqs.py
|
set_attributes
|
python
|
def set_attributes(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
attributes = _preprocess_attributes(attributes)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.set_queue_attributes(QueueUrl=url, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
Set attributes on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sqs.py#L226-L252
|
[
"def _preprocess_attributes(attributes):\n '''\n Pre-process incoming queue attributes before setting them\n '''\n if isinstance(attributes, six.string_types):\n attributes = salt.utils.json.loads(attributes)\n\n def stringified(val):\n # Some attributes take full json policy documents, but they take them\n # as json strings. Convert the value back into a json string.\n if isinstance(val, dict):\n return salt.utils.json.dumps(val)\n return val\n\n return dict(\n (attr, stringified(val)) for attr, val in six.iteritems(attributes)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
# Import Salt libs
import salt.utils.json
import salt.utils.versions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
}
# Import third party libs
try:
# pylint: disable=unused-import
import boto3
import botocore
# pylint: enable=unused-import
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def __virtual__():
'''
Only load if boto3 libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__['boto3.assign_funcs'](__name__, 'sqs')
return has_boto_reqs
def _preprocess_attributes(attributes):
'''
Pre-process incoming queue attributes before setting them
'''
if isinstance(attributes, six.string_types):
attributes = salt.utils.json.loads(attributes)
def stringified(val):
# Some attributes take full json policy documents, but they take them
# as json strings. Convert the value back into a json string.
if isinstance(val, dict):
return salt.utils.json.dumps(val)
return val
return dict(
(attr, stringified(val)) for attr, val in six.iteritems(attributes)
)
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def create(
name,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Create an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if attributes is None:
attributes = {}
attributes = _preprocess_attributes(attributes)
try:
conn.create_queue(QueueName=name, Attributes=attributes)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
def list_(prefix='', region=None, key=None, keyid=None, profile=None):
'''
Return a list of the names of all visible queues.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.list region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
def extract_name(queue_url):
# Note: this logic taken from boto, so should be safe
return _urlparse(queue_url).path.split('/')[2]
try:
r = conn.list_queues(QueueNamePrefix=prefix)
# The 'QueueUrls' attribute is missing if there are no queues
urls = r.get('QueueUrls', [])
return {'result': [extract_name(url) for url in urls]}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Return attributes currently set on an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All'])
return {'result': r['Attributes']}
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
saltstack/salt
|
salt/states/netconfig.py
|
_update_config
|
python
|
def _update_config(template_name,
template_source=None,
template_hash=None,
template_hash_name=None,
template_user='root',
template_group='root',
template_mode='755',
template_attrs='--------------e----',
saltenv=None,
template_engine='jinja',
skip_verify=False,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
**template_vars):
'''
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
'''
return __salt__['net.load_template'](template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
template_user=template_user,
template_group=template_group,
template_mode=template_mode,
template_attrs=template_attrs,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
defaults=defaults,
test=test,
commit=commit,
debug=debug,
replace=replace,
**template_vars)
|
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netconfig.py#L56-L95
| null |
# -*- coding: utf-8 -*-
'''
Network Config
==============
Manage the configuration on a network device given a specific static config or template.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Network-related basic features execution module <salt.modules.napalm_network>`
.. versionadded:: 2017.7.0
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# import Salt libs
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netconfig'
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def replace_pattern(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source='running',
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
replace_ret = __salt__['net.replace_pattern'](pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal,
source=source,
path=path,
test=test,
replace=replace,
debug=debug,
commit=commit)
return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug)
def saved(name,
source='running',
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup='',
show_changes=True,
create=True,
tmp_dir='',
tmp_ext='',
encoding=None,
encoding_errors='strict',
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
'''
ret = __salt__['net.config'](source=source)
if not ret['result']:
return {
'name': name,
'changes': {},
'result': False,
'comment': ret['comment']
}
return __states__['file.managed'](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret['out'][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs)
def managed(name,
template_name=None,
template_source=None,
template_hash=None,
template_hash_name=None,
saltenv='base',
template_engine='jinja',
skip_verify=False,
context=None,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
**template_vars):
'''
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
commit_in = __salt__['config.merge']('commit_in', commit_in)
commit_at = __salt__['config.merge']('commit_at', commit_at)
revert_in = __salt__['config.merge']('revert_in', revert_in)
revert_at = __salt__['config.merge']('revert_at', revert_at)
config_update_ret = _update_config(template_name=template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
context=context,
defaults=defaults,
test=test,
commit=commit,
commit_in=commit_in,
commit_at=commit_at,
revert_in=revert_in,
revert_at=revert_at,
debug=debug,
replace=replace,
**template_vars)
return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug)
def commit_cancelled(name):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
'''
cancelled = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
cancelled['comment'] = 'It would cancel commit #{}'.format(name)
return cancelled
ret = __salt__['net.cancel_commit'](name)
cancelled.update(ret)
return cancelled
def commit_confirmed(name):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
'''
confirmed = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
confirmed['comment'] = 'It would confirm commit #{}'.format(name)
return confirmed
ret = __salt__['net.confirm_commit'](name)
confirmed.update(ret)
return confirmed
|
saltstack/salt
|
salt/states/netconfig.py
|
replace_pattern
|
python
|
def replace_pattern(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source='running',
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
replace_ret = __salt__['net.replace_pattern'](pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal,
source=source,
path=path,
test=test,
replace=replace,
debug=debug,
commit=commit)
return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug)
|
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netconfig.py#L102-L244
|
[
"def default_ret(name):\n '''\n Return the default dict of the state output.\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n",
"def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None):\n '''\n Return the final state output.\n ret\n The initial state output structure.\n loaded\n The loaded dictionary.\n '''\n # Always get the comment\n changes = {}\n ret['comment'] = loaded['comment']\n if 'diff' in loaded:\n changes['diff'] = loaded['diff']\n if 'commit_id' in loaded:\n changes['commit_id'] = loaded['commit_id']\n if 'compliance_report' in loaded:\n if compliance_report:\n changes['compliance_report'] = loaded['compliance_report']\n if debug and 'loaded_config' in loaded:\n changes['loaded_config'] = loaded['loaded_config']\n if changes.get('diff'):\n ret['comment'] = '{comment_base}\\n\\nConfiguration diff:\\n\\n{diff}'.format(comment_base=ret['comment'],\n diff=changes['diff'])\n if changes.get('loaded_config'):\n ret['comment'] = '{comment_base}\\n\\nLoaded config:\\n\\n{loaded_cfg}'.format(\n comment_base=ret['comment'],\n loaded_cfg=changes['loaded_config'])\n if changes.get('compliance_report'):\n ret['comment'] = '{comment_base}\\n\\nCompliance report:\\n\\n{compliance}'.format(\n comment_base=ret['comment'],\n compliance=salt.output.string_format(changes['compliance_report'], 'nested', opts=opts))\n if not loaded.get('result', False):\n # Failure of some sort\n return ret\n if not loaded.get('already_configured', True):\n # We're making changes\n if test:\n ret['result'] = None\n return ret\n # Not test, changes were applied\n ret.update({\n 'result': True,\n 'changes': changes,\n 'comment': \"Configuration changed!\\n{}\".format(loaded['comment'])\n })\n return ret\n # No changes\n ret.update({\n 'result': True,\n 'changes': {}\n })\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Network Config
==============
Manage the configuration on a network device given a specific static config or template.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Network-related basic features execution module <salt.modules.napalm_network>`
.. versionadded:: 2017.7.0
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# import Salt libs
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netconfig'
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _update_config(template_name,
template_source=None,
template_hash=None,
template_hash_name=None,
template_user='root',
template_group='root',
template_mode='755',
template_attrs='--------------e----',
saltenv=None,
template_engine='jinja',
skip_verify=False,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
**template_vars):
'''
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
'''
return __salt__['net.load_template'](template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
template_user=template_user,
template_group=template_group,
template_mode=template_mode,
template_attrs=template_attrs,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
defaults=defaults,
test=test,
commit=commit,
debug=debug,
replace=replace,
**template_vars)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def saved(name,
source='running',
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup='',
show_changes=True,
create=True,
tmp_dir='',
tmp_ext='',
encoding=None,
encoding_errors='strict',
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
'''
ret = __salt__['net.config'](source=source)
if not ret['result']:
return {
'name': name,
'changes': {},
'result': False,
'comment': ret['comment']
}
return __states__['file.managed'](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret['out'][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs)
def managed(name,
template_name=None,
template_source=None,
template_hash=None,
template_hash_name=None,
saltenv='base',
template_engine='jinja',
skip_verify=False,
context=None,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
**template_vars):
'''
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
commit_in = __salt__['config.merge']('commit_in', commit_in)
commit_at = __salt__['config.merge']('commit_at', commit_at)
revert_in = __salt__['config.merge']('revert_in', revert_in)
revert_at = __salt__['config.merge']('revert_at', revert_at)
config_update_ret = _update_config(template_name=template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
context=context,
defaults=defaults,
test=test,
commit=commit,
commit_in=commit_in,
commit_at=commit_at,
revert_in=revert_in,
revert_at=revert_at,
debug=debug,
replace=replace,
**template_vars)
return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug)
def commit_cancelled(name):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
'''
cancelled = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
cancelled['comment'] = 'It would cancel commit #{}'.format(name)
return cancelled
ret = __salt__['net.cancel_commit'](name)
cancelled.update(ret)
return cancelled
def commit_confirmed(name):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
'''
confirmed = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
confirmed['comment'] = 'It would confirm commit #{}'.format(name)
return confirmed
ret = __salt__['net.confirm_commit'](name)
confirmed.update(ret)
return confirmed
|
saltstack/salt
|
salt/states/netconfig.py
|
saved
|
python
|
def saved(name,
source='running',
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup='',
show_changes=True,
create=True,
tmp_dir='',
tmp_ext='',
encoding=None,
encoding_errors='strict',
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
'''
ret = __salt__['net.config'](source=source)
if not ret['result']:
return {
'name': name,
'changes': {},
'result': False,
'comment': ret['comment']
}
return __states__['file.managed'](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret['out'][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs)
|
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netconfig.py#L247-L451
| null |
# -*- coding: utf-8 -*-
'''
Network Config
==============
Manage the configuration on a network device given a specific static config or template.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Network-related basic features execution module <salt.modules.napalm_network>`
.. versionadded:: 2017.7.0
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# import Salt libs
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netconfig'
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _update_config(template_name,
template_source=None,
template_hash=None,
template_hash_name=None,
template_user='root',
template_group='root',
template_mode='755',
template_attrs='--------------e----',
saltenv=None,
template_engine='jinja',
skip_verify=False,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
**template_vars):
'''
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
'''
return __salt__['net.load_template'](template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
template_user=template_user,
template_group=template_group,
template_mode=template_mode,
template_attrs=template_attrs,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
defaults=defaults,
test=test,
commit=commit,
debug=debug,
replace=replace,
**template_vars)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def replace_pattern(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source='running',
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
replace_ret = __salt__['net.replace_pattern'](pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal,
source=source,
path=path,
test=test,
replace=replace,
debug=debug,
commit=commit)
return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug)
def managed(name,
template_name=None,
template_source=None,
template_hash=None,
template_hash_name=None,
saltenv='base',
template_engine='jinja',
skip_verify=False,
context=None,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
**template_vars):
'''
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
commit_in = __salt__['config.merge']('commit_in', commit_in)
commit_at = __salt__['config.merge']('commit_at', commit_at)
revert_in = __salt__['config.merge']('revert_in', revert_in)
revert_at = __salt__['config.merge']('revert_at', revert_at)
config_update_ret = _update_config(template_name=template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
context=context,
defaults=defaults,
test=test,
commit=commit,
commit_in=commit_in,
commit_at=commit_at,
revert_in=revert_in,
revert_at=revert_at,
debug=debug,
replace=replace,
**template_vars)
return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug)
def commit_cancelled(name):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
'''
cancelled = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
cancelled['comment'] = 'It would cancel commit #{}'.format(name)
return cancelled
ret = __salt__['net.cancel_commit'](name)
cancelled.update(ret)
return cancelled
def commit_confirmed(name):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
'''
confirmed = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
confirmed['comment'] = 'It would confirm commit #{}'.format(name)
return confirmed
ret = __salt__['net.confirm_commit'](name)
confirmed.update(ret)
return confirmed
|
saltstack/salt
|
salt/states/netconfig.py
|
managed
|
python
|
def managed(name,
template_name=None,
template_source=None,
template_hash=None,
template_hash_name=None,
saltenv='base',
template_engine='jinja',
skip_verify=False,
context=None,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
**template_vars):
'''
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
commit_in = __salt__['config.merge']('commit_in', commit_in)
commit_at = __salt__['config.merge']('commit_at', commit_at)
revert_in = __salt__['config.merge']('revert_in', revert_in)
revert_at = __salt__['config.merge']('revert_at', revert_at)
config_update_ret = _update_config(template_name=template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
context=context,
defaults=defaults,
test=test,
commit=commit,
commit_in=commit_in,
commit_at=commit_at,
revert_in=revert_in,
revert_at=revert_at,
debug=debug,
replace=replace,
**template_vars)
return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug)
|
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netconfig.py#L454-L848
|
[
"def default_ret(name):\n '''\n Return the default dict of the state output.\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n",
"def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None):\n '''\n Return the final state output.\n ret\n The initial state output structure.\n loaded\n The loaded dictionary.\n '''\n # Always get the comment\n changes = {}\n ret['comment'] = loaded['comment']\n if 'diff' in loaded:\n changes['diff'] = loaded['diff']\n if 'commit_id' in loaded:\n changes['commit_id'] = loaded['commit_id']\n if 'compliance_report' in loaded:\n if compliance_report:\n changes['compliance_report'] = loaded['compliance_report']\n if debug and 'loaded_config' in loaded:\n changes['loaded_config'] = loaded['loaded_config']\n if changes.get('diff'):\n ret['comment'] = '{comment_base}\\n\\nConfiguration diff:\\n\\n{diff}'.format(comment_base=ret['comment'],\n diff=changes['diff'])\n if changes.get('loaded_config'):\n ret['comment'] = '{comment_base}\\n\\nLoaded config:\\n\\n{loaded_cfg}'.format(\n comment_base=ret['comment'],\n loaded_cfg=changes['loaded_config'])\n if changes.get('compliance_report'):\n ret['comment'] = '{comment_base}\\n\\nCompliance report:\\n\\n{compliance}'.format(\n comment_base=ret['comment'],\n compliance=salt.output.string_format(changes['compliance_report'], 'nested', opts=opts))\n if not loaded.get('result', False):\n # Failure of some sort\n return ret\n if not loaded.get('already_configured', True):\n # We're making changes\n if test:\n ret['result'] = None\n return ret\n # Not test, changes were applied\n ret.update({\n 'result': True,\n 'changes': changes,\n 'comment': \"Configuration changed!\\n{}\".format(loaded['comment'])\n })\n return ret\n # No changes\n ret.update({\n 'result': True,\n 'changes': {}\n })\n return ret\n",
"def _update_config(template_name,\n template_source=None,\n template_hash=None,\n template_hash_name=None,\n template_user='root',\n template_group='root',\n template_mode='755',\n template_attrs='--------------e----',\n saltenv=None,\n template_engine='jinja',\n skip_verify=False,\n defaults=None,\n test=False,\n commit=True,\n debug=False,\n replace=False,\n **template_vars):\n '''\n Call the necessary functions in order to execute the state.\n For the moment this only calls the ``net.load_template`` function from the\n :mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.\n '''\n\n return __salt__['net.load_template'](template_name,\n template_source=template_source,\n template_hash=template_hash,\n template_hash_name=template_hash_name,\n template_user=template_user,\n template_group=template_group,\n template_mode=template_mode,\n template_attrs=template_attrs,\n saltenv=saltenv,\n template_engine=template_engine,\n skip_verify=skip_verify,\n defaults=defaults,\n test=test,\n commit=commit,\n debug=debug,\n replace=replace,\n **template_vars)\n"
] |
# -*- coding: utf-8 -*-
'''
Network Config
==============
Manage the configuration on a network device given a specific static config or template.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Network-related basic features execution module <salt.modules.napalm_network>`
.. versionadded:: 2017.7.0
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# import Salt libs
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netconfig'
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _update_config(template_name,
template_source=None,
template_hash=None,
template_hash_name=None,
template_user='root',
template_group='root',
template_mode='755',
template_attrs='--------------e----',
saltenv=None,
template_engine='jinja',
skip_verify=False,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
**template_vars):
'''
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
'''
return __salt__['net.load_template'](template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
template_user=template_user,
template_group=template_group,
template_mode=template_mode,
template_attrs=template_attrs,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
defaults=defaults,
test=test,
commit=commit,
debug=debug,
replace=replace,
**template_vars)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def replace_pattern(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source='running',
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
replace_ret = __salt__['net.replace_pattern'](pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal,
source=source,
path=path,
test=test,
replace=replace,
debug=debug,
commit=commit)
return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug)
def saved(name,
source='running',
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup='',
show_changes=True,
create=True,
tmp_dir='',
tmp_ext='',
encoding=None,
encoding_errors='strict',
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
'''
ret = __salt__['net.config'](source=source)
if not ret['result']:
return {
'name': name,
'changes': {},
'result': False,
'comment': ret['comment']
}
return __states__['file.managed'](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret['out'][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs)
def commit_cancelled(name):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
'''
cancelled = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
cancelled['comment'] = 'It would cancel commit #{}'.format(name)
return cancelled
ret = __salt__['net.cancel_commit'](name)
cancelled.update(ret)
return cancelled
def commit_confirmed(name):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
'''
confirmed = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
confirmed['comment'] = 'It would confirm commit #{}'.format(name)
return confirmed
ret = __salt__['net.confirm_commit'](name)
confirmed.update(ret)
return confirmed
|
saltstack/salt
|
salt/states/netconfig.py
|
commit_cancelled
|
python
|
def commit_cancelled(name):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
'''
cancelled = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
cancelled['comment'] = 'It would cancel commit #{}'.format(name)
return cancelled
ret = __salt__['net.cancel_commit'](name)
cancelled.update(ret)
return cancelled
|
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netconfig.py#L851-L880
| null |
# -*- coding: utf-8 -*-
'''
Network Config
==============
Manage the configuration on a network device given a specific static config or template.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Network-related basic features execution module <salt.modules.napalm_network>`
.. versionadded:: 2017.7.0
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# import Salt libs
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netconfig'
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _update_config(template_name,
template_source=None,
template_hash=None,
template_hash_name=None,
template_user='root',
template_group='root',
template_mode='755',
template_attrs='--------------e----',
saltenv=None,
template_engine='jinja',
skip_verify=False,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
**template_vars):
'''
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
'''
return __salt__['net.load_template'](template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
template_user=template_user,
template_group=template_group,
template_mode=template_mode,
template_attrs=template_attrs,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
defaults=defaults,
test=test,
commit=commit,
debug=debug,
replace=replace,
**template_vars)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def replace_pattern(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source='running',
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
replace_ret = __salt__['net.replace_pattern'](pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal,
source=source,
path=path,
test=test,
replace=replace,
debug=debug,
commit=commit)
return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug)
def saved(name,
source='running',
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup='',
show_changes=True,
create=True,
tmp_dir='',
tmp_ext='',
encoding=None,
encoding_errors='strict',
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
'''
ret = __salt__['net.config'](source=source)
if not ret['result']:
return {
'name': name,
'changes': {},
'result': False,
'comment': ret['comment']
}
return __states__['file.managed'](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret['out'][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs)
def managed(name,
template_name=None,
template_source=None,
template_hash=None,
template_hash_name=None,
saltenv='base',
template_engine='jinja',
skip_verify=False,
context=None,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
**template_vars):
'''
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
commit_in = __salt__['config.merge']('commit_in', commit_in)
commit_at = __salt__['config.merge']('commit_at', commit_at)
revert_in = __salt__['config.merge']('revert_in', revert_in)
revert_at = __salt__['config.merge']('revert_at', revert_at)
config_update_ret = _update_config(template_name=template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
context=context,
defaults=defaults,
test=test,
commit=commit,
commit_in=commit_in,
commit_at=commit_at,
revert_in=revert_in,
revert_at=revert_at,
debug=debug,
replace=replace,
**template_vars)
return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug)
def commit_confirmed(name):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
'''
confirmed = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
confirmed['comment'] = 'It would confirm commit #{}'.format(name)
return confirmed
ret = __salt__['net.confirm_commit'](name)
confirmed.update(ret)
return confirmed
|
saltstack/salt
|
salt/states/netconfig.py
|
commit_confirmed
|
python
|
def commit_confirmed(name):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
'''
confirmed = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
confirmed['comment'] = 'It would confirm commit #{}'.format(name)
return confirmed
ret = __salt__['net.confirm_commit'](name)
confirmed.update(ret)
return confirmed
|
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/netconfig.py#L883-L912
| null |
# -*- coding: utf-8 -*-
'''
Network Config
==============
Manage the configuration on a network device given a specific static config or template.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Network-related basic features execution module <salt.modules.napalm_network>`
.. versionadded:: 2017.7.0
'''
# Import Salt libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# import Salt libs
import salt.utils.napalm
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netconfig'
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _update_config(template_name,
template_source=None,
template_hash=None,
template_hash_name=None,
template_user='root',
template_group='root',
template_mode='755',
template_attrs='--------------e----',
saltenv=None,
template_engine='jinja',
skip_verify=False,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
**template_vars):
'''
Call the necessary functions in order to execute the state.
For the moment this only calls the ``net.load_template`` function from the
:mod:`Network-related basic features execution module <salt.modules.napalm_network>`, but this may change in time.
'''
return __salt__['net.load_template'](template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
template_user=template_user,
template_group=template_group,
template_mode=template_mode,
template_attrs=template_attrs,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
defaults=defaults,
test=test,
commit=commit,
debug=debug,
replace=replace,
**template_vars)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def replace_pattern(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source='running',
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
State SLS Example:
.. code-block:: yaml
update_policy_name:
netconfig.replace_pattern:
- pattern: OLD-POLICY-NAME
- repl: new-policy-name
- debug: true
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
replace_ret = __salt__['net.replace_pattern'](pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal,
source=source,
path=path,
test=test,
replace=replace,
debug=debug,
commit=commit)
return salt.utils.napalm.loaded_ret(ret, replace_ret, test, debug)
def saved(name,
source='running',
user=None,
group=None,
mode=None,
attrs=None,
makedirs=False,
dir_mode=None,
replace=True,
backup='',
show_changes=True,
create=True,
tmp_dir='',
tmp_ext='',
encoding=None,
encoding_errors='strict',
allow_empty=False,
follow_symlinks=True,
check_cmd=None,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
name
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``aAcCdDeijPsStTu``.
.. note::
This option is **not** supported on Windows.
makedirs: ``False``
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace: ``True``
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes: ``True``
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create: ``True``
If set to ``False``, then the file will only be managed if the file
already exists on the system.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
encoding_errors: ``'strict'``
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
allow_empty: ``True``
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks: ``True``
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
win_owner: ``None``
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
win_perms: ``None``
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_deny_perms: ``None``
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
win_inheritance: ``True``
True to inherit permissions from the parent directory, False not to
inherit permission.
win_perms_reset: ``False``
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
State SLS Example:
.. code-block:: yaml
/var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg:
netconfig.saved:
- source: running
- makedirs: true
The state SLS above would create a backup config grouping the files by the
Minion ID, in chronological files. For example, if the state is executed at
on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the
configuration would saved in the file:
``/var/backups/core01.lon01/1533316558.cfg``
'''
ret = __salt__['net.config'](source=source)
if not ret['result']:
return {
'name': name,
'changes': {},
'result': False,
'comment': ret['comment']
}
return __states__['file.managed'](name,
user=user,
group=group,
mode=mode,
attrs=attrs,
makedirs=makedirs,
dir_mode=dir_mode,
replace=replace,
backup=backup,
show_changes=show_changes,
create=create,
contents=ret['out'][source],
tmp_dir=tmp_dir,
tmp_ext=tmp_ext,
encoding=encoding,
encoding_errors=encoding_errors,
allow_empty=allow_empty,
follow_symlinks=follow_symlinks,
check_cmd=check_cmd,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
**kwargs)
def managed(name,
template_name=None,
template_source=None,
template_hash=None,
template_hash_name=None,
saltenv='base',
template_engine='jinja',
skip_verify=False,
context=None,
defaults=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
**template_vars):
'''
Manages the configuration on network devices.
By default this state will commit the changes on the device. If there are no changes required, it does not commit
and the field ``already_configured`` from the output dictionary will be set as ``True`` to notify that.
To avoid committing the configuration, set the argument ``test`` to ``True`` (or via the CLI argument ``test=True``)
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False`` (either as CLI argument, either as state parameter).
However, this is recommended to be used only in exceptional cases when there are applied few consecutive states
and/or configuration changes. Otherwise the user might forget that the config DB is locked and the candidate config
buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
template_name
Identifies path to the template source. The template can be either stored on the local machine,
either remotely.
The recommended location is under the ``file_roots`` as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
template_hash_name: None
When ``template_hash`` refers to a remote file, this specifies the filename to look for in that file.
saltenv: base
Specifies the template environment. This will influence the relative imports inside the templates.
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionchanged:: 2017.7.1
test: False
Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False``
(will commit the changes on the device).
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing
either of the :py:func:`state.sls <salt.modules.state.sls>` or
:py:func:`state.apply <salt.modules.state.apply>` (see below for an
example).
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
template_vars
Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
.. note::
It is more recommended to use the ``context`` argument instead, to
avoid any conflicts with other arguments.
SLS Example (e.g.: under salt://router/config.sls) :
.. code-block:: yaml
whole_config_example:
netconfig.managed:
- template_name: salt://path/to/complete_config.jinja
- debug: True
- replace: True
bgp_config_example:
netconfig.managed:
- template_name: /absolute/path/to/bgp_neighbors.mako
- template_engine: mako
prefix_lists_example:
netconfig.managed:
- template_name: prefix_lists.cheetah
- debug: True
- template_engine: cheetah
ntp_peers_example:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- skip_verify: False
- debug: True
- peers:
- 192.168.0.1
- 192.168.0.1
ntp_peers_example_using_pillar:
netconfig.managed:
- template_name: http://bit.ly/2gKOj20
- peers: {{ pillar.get('ntp.peers', []) }}
Multi template example:
.. code-block:: yaml
hostname_and_ntp:
netconfig.managed:
- template_name:
- https://bit.ly/2OhSgqP
- https://bit.ly/2M6C4Lx
- https://bit.ly/2OIWVTs
- debug: true
- context:
hostname: {{ opts.id }}
servers:
- 172.17.17.1
- 172.17.17.2
peers:
- 192.168.0.1
- 192.168.0.2
Usage examples:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
is to provide usage examples.
Output example:
.. code-block:: bash
$ sudo salt 'juniper.device' state.sls router.config test=True
juniper.device:
----------
ID: ntp_peers_example_using_pillar
Function: netconfig.managed
Result: None
Comment: Testing mode: Configuration discarded.
Started: 12:01:40.744535
Duration: 8755.788 ms
Changes:
----------
diff:
[edit system ntp]
peer 192.168.0.1 { ... }
+ peer 172.17.17.1;
+ peer 172.17.17.3;
Summary for juniper.device
------------
Succeeded: 1 (unchanged=1, changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 8.756 s
Raw output example (useful when the output is reused in other states/execution modules):
.. code-block:: bash
$ sudo salt --out=pprint 'juniper.device' state.sls router.config test=True debug=True
.. code-block:: python
{
'juniper.device': {
'netconfig_|-ntp_peers_example_using_pillar_|-ntp_peers_example_using_pillar_|-managed': {
'__id__': 'ntp_peers_example_using_pillar',
'__run_num__': 0,
'already_configured': False,
'changes': {
'diff': '[edit system ntp] peer 192.168.0.1 { ... }+ peer 172.17.17.1;+ peer 172.17.17.3;'
},
'comment': 'Testing mode: Configuration discarded.',
'duration': 7400.759,
'loaded_config': 'system { ntp { peer 172.17.17.1; peer 172.17.17.3; } }',
'name': 'ntp_peers_example_using_pillar',
'result': None,
'start_time': '12:09:09.811445'
}
}
}
'''
ret = salt.utils.napalm.default_ret(name)
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
commit_in = __salt__['config.merge']('commit_in', commit_in)
commit_at = __salt__['config.merge']('commit_at', commit_at)
revert_in = __salt__['config.merge']('revert_in', revert_in)
revert_at = __salt__['config.merge']('revert_at', revert_at)
config_update_ret = _update_config(template_name=template_name,
template_source=template_source,
template_hash=template_hash,
template_hash_name=template_hash_name,
saltenv=saltenv,
template_engine=template_engine,
skip_verify=skip_verify,
context=context,
defaults=defaults,
test=test,
commit=commit,
commit_in=commit_in,
commit_at=commit_at,
revert_in=revert_in,
revert_at=revert_at,
debug=debug,
replace=replace,
**template_vars)
return salt.utils.napalm.loaded_ret(ret, config_update_ret, test, debug)
def commit_cancelled(name):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_cancelled
'''
cancelled = {
'name': name,
'result': None,
'changes': {},
'comment': ''
}
if __opts__['test']:
cancelled['comment'] = 'It would cancel commit #{}'.format(name)
return cancelled
ret = __salt__['net.cancel_commit'](name)
cancelled.update(ret)
return cancelled
|
saltstack/salt
|
salt/serializers/python.py
|
serialize
|
python
|
def serialize(obj, **options):
'''
Serialize Python data to a Python string representation (via pprint.format)
:param obj: the data structure to serialize
:param options: options given to pprint.format
'''
#round-trip this through JSON to avoid OrderedDict types
# there's probably a more performant way to do this...
# TODO remove json round-trip when all dataset will use
# serializers
return pprint.pformat(
salt.utils.json.loads(
salt.utils.json.dumps(obj, _json_module=_json),
_json_module=_json
),
**options
)
|
Serialize Python data to a Python string representation (via pprint.format)
:param obj: the data structure to serialize
:param options: options given to pprint.format
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/serializers/python.py#L25-L43
|
[
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n",
"def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n"
] |
# -*- coding: utf-8 -*-
'''
salt.serializers.python
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 2016.3.0
Implements a Python serializer (via pprint.format)
'''
from __future__ import absolute_import, unicode_literals
import pprint
try:
import simplejson as _json
except ImportError:
import json as _json # pylint: disable=blacklisted-import
import salt.utils.json
__all__ = ['serialize', 'available']
available = True
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
init
|
python
|
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
|
Required.
Can be used to initialize the server connection.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L38-L52
| null |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
grains
|
python
|
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
|
Get the grains from the proxied device
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L64-L78
|
[
"def parse(out):\n '''\n Extract json from out.\n\n Parameter\n out: Type string. The data returned by the\n ssh command.\n '''\n jsonret = []\n in_json = False\n for ln_ in out.split('\\n'):\n if '{' in ln_:\n in_json = True\n if in_json:\n jsonret.append(ln_)\n if '}' in ln_:\n in_json = False\n return salt.utils.json.loads('\\n'.join(jsonret))\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
ping
|
python
|
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
|
Required.
Ping the device on the other end of the connection
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L94-L104
| null |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
parse
|
python
|
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
|
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L114-L131
|
[
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
package_install
|
python
|
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
Install a "package" on the ssh server
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L150-L162
|
[
"def parse(out):\n '''\n Extract json from out.\n\n Parameter\n out: Type string. The data returned by the\n ssh command.\n '''\n jsonret = []\n in_json = False\n for ln_ in out.split('\\n'):\n if '{' in ln_:\n in_json = True\n if in_json:\n jsonret.append(ln_)\n if '}' in ln_:\n in_json = False\n return salt.utils.json.loads('\\n'.join(jsonret))\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
package_remove
|
python
|
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
Remove a "package" on the ssh server
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L165-L175
|
[
"def parse(out):\n '''\n Extract json from out.\n\n Parameter\n out: Type string. The data returned by the\n ssh command.\n '''\n jsonret = []\n in_json = False\n for ln_ in out.split('\\n'):\n if '{' in ln_:\n in_json = True\n if in_json:\n jsonret.append(ln_)\n if '}' in ln_:\n in_json = False\n return salt.utils.json.loads('\\n'.join(jsonret))\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
service_start
|
python
|
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L193-L205
|
[
"def parse(out):\n '''\n Extract json from out.\n\n Parameter\n out: Type string. The data returned by the\n ssh command.\n '''\n jsonret = []\n in_json = False\n for ln_ in out.split('\\n'):\n if '{' in ln_:\n in_json = True\n if in_json:\n jsonret.append(ln_)\n if '}' in ln_:\n in_json = False\n return salt.utils.json.loads('\\n'.join(jsonret))\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
service_stop
|
python
|
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L208-L220
|
[
"def parse(out):\n '''\n Extract json from out.\n\n Parameter\n out: Type string. The data returned by the\n ssh command.\n '''\n jsonret = []\n in_json = False\n for ln_ in out.split('\\n'):\n if '{' in ln_:\n in_json = True\n if in_json:\n jsonret.append(ln_)\n if '}' in ln_:\n in_json = False\n return salt.utils.json.loads('\\n'.join(jsonret))\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/proxy/ssh_sample.py
|
service_restart
|
python
|
def service_restart(name):
'''
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'restart ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
Restart a "service" on the ssh server
.. versionadded:: 2015.8.2
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/ssh_sample.py#L223-L235
|
[
"def parse(out):\n '''\n Extract json from out.\n\n Parameter\n out: Type string. The data returned by the\n ssh command.\n '''\n jsonret = []\n in_json = False\n for ln_ in out.split('\\n'):\n if '{' in ln_:\n in_json = True\n if in_json:\n jsonret.append(ln_)\n if '}' in ln_:\n in_json = False\n return salt.utils.json.loads('\\n'.join(jsonret))\n"
] |
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
a server that exposes functionality via SSH.
This can be used as an option when the device does not provide
an api over HTTP and doesn't have the python stack to run a minion.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import Salt libs
import salt.utils.json
from salt.utils.vt_helper import SSHConnection
from salt.utils.vt import TerminalException
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['ssh_sample']
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.info('ssh_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False
def initialized():
'''
Since grains are loaded in many different places and some of those
places occur before the proxy can be initialized, return whether
our init() function has been called
'''
return DETAILS.get('initialized', False)
def grains():
'''
Get the grains from the proxied device
'''
if not DETAILS.get('grains_cache', {}):
cmd = 'info'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
DETAILS['grains_cache'] = parse(out)
return DETAILS['grains_cache']
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
DETAILS['grains_cache'] = None
return grains()
def fns():
return {'details': 'This key is here because a function in '
'grains/ssh_sample.py called fns() here in the proxymodule.'}
def ping():
'''
Required.
Ping the device on the other end of the connection
'''
try:
out, err = DETAILS['server'].sendline('help')
return True
except TerminalException as e:
log.error(e)
return False
def shutdown(opts):
'''
Disconnect
'''
DETAILS['server'].close_connection()
def parse(out):
'''
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
'''
jsonret = []
in_json = False
for ln_ in out.split('\n'):
if '{' in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if '}' in ln_:
in_json = False
return salt.utils.json.loads('\n'.join(jsonret))
def package_list():
'''
List "packages" by executing a command via ssh
This function is called in response to the salt command
..code-block::bash
salt target_minion pkg.list_pkgs
'''
# Send the command to execute
out, err = DETAILS['server'].sendline('pkg_list\n')
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_install(name, **kwargs):
'''
Install a "package" on the ssh server
'''
cmd = 'pkg_install ' + name
if kwargs.get('version', False):
cmd += ' ' + kwargs['version']
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def package_remove(name):
'''
Remove a "package" on the ssh server
'''
cmd = 'pkg_remove ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_list():
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'ps'
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
def service_stop(name):
'''
Stop a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'stop ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out)
|
saltstack/salt
|
salt/modules/jboss7_cli.py
|
run_command
|
python
|
def run_command(jboss_config, command, fail_on_error=True):
'''
Execute a command against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
command
Command to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_command '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_command
'''
cli_command_result = __call_cli(jboss_config, command)
if cli_command_result['retcode'] == 0:
cli_command_result['success'] = True
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_command_result['success'] = False
return cli_command_result
|
Execute a command against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
command
Command to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_command '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_command
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/jboss7_cli.py#L55-L83
|
[
"def __call_cli(jboss_config, command, retries=1):\n command_segments = [\n jboss_config['cli_path'],\n '--connect',\n '--controller=\"{0}\"'.format(jboss_config['controller'])\n ]\n if 'cli_user' in six.iterkeys(jboss_config):\n command_segments.append('--user=\"{0}\"'.format(jboss_config['cli_user']))\n if 'cli_password' in six.iterkeys(jboss_config):\n command_segments.append('--password=\"{0}\"'.format(jboss_config['cli_password']))\n command_segments.append('--command=\"{0}\"'.format(__escape_command(command)))\n cli_script = ' '.join(command_segments)\n\n cli_command_result = __salt__['cmd.run_all'](cli_script)\n log.debug('cli_command_result=%s', cli_command_result)\n\n log.debug('========= STDOUT:\\n%s', cli_command_result['stdout'])\n log.debug('========= STDERR:\\n%s', cli_command_result['stderr'])\n log.debug('========= RETCODE: %d', cli_command_result['retcode'])\n\n if cli_command_result['retcode'] == 127:\n raise CommandExecutionError('Could not execute jboss-cli.sh script. Have you specified server_dir variable correctly?\\nCurrent CLI path: {cli_path}. '.format(cli_path=jboss_config['cli_path']))\n\n if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:\n raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))\n\n # It may happen that eventhough server is up it may not respond to the call\n if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli\n log.debug('Command failed, retrying... (%d tries left)', retries)\n time.sleep(3)\n return __call_cli(jboss_config, command, retries - 1)\n\n return cli_command_result\n"
] |
# -*- coding: utf-8 -*-
'''
Module for low-level interaction with JbossAS7 through CLI.
This module exposes two ways of interaction with the CLI, either through commands or operations.
.. note:: Following JBoss documentation (https://developer.jboss.org/wiki/CommandLineInterface):
"Operations are considered a low level but comprehensive way to manage the AS controller, i.e. if it can't be done with operations it can't be done in any other way.
Commands, on the other hand, are more user-friendly in syntax,
although most of them still translate into operation requests and some of them even into a few
composite operation requests, i.e. commands also simplify some management operations from the user's point of view."
The difference between calling a command or operation is in handling the result.
Commands return a zero return code if operation is successful or return non-zero return code and
print an error to standard output in plain text, in case of an error.
Operations return a json-like structure, that contain more information about the result.
In case of a failure, they also return a specific return code. This module parses the output from the operations and
returns it as a dictionary so that an execution of an operation can then be verified against specific errors.
In order to run each function, jboss_config dictionary with the following properties must be passed:
* cli_path: the path to jboss-cli script, for example: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
* controller: the IP address and port of controller, for example: 10.11.12.13:9999
* cli_user: username to connect to jboss administration console if necessary
* cli_password: password to connect to jboss administration console if necessary
Example:
.. code-block:: yaml
jboss_config:
cli_path: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
controller: 10.11.12.13:9999
cli_user: 'jbossadm'
cli_password: 'jbossadm'
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
import pprint
import time
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
'''
Execute an operation against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
operation
An operation to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
retries:
Number of retries in case of "JBAS012144: Could not connect to remote" error.
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_operation '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_operation
'''
cli_command_result = __call_cli(jboss_config, operation, retries)
if cli_command_result['retcode'] == 0:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = cli_result['outcome'] == 'success'
else:
raise CommandExecutionError('Operation has returned unparseable output: {0}'.format(cli_command_result['stdout']))
else:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = False
match = re.search(r'^(JBAS\d+):', cli_result['failure-description'])
cli_result['err_code'] = match.group(1)
cli_result['stdout'] = cli_command_result['stdout']
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_result = {
'success': False,
'stdout': cli_command_result['stdout'],
'stderr': cli_command_result['stderr'],
'retcode': cli_command_result['retcode']
}
return cli_result
def __call_cli(jboss_config, command, retries=1):
command_segments = [
jboss_config['cli_path'],
'--connect',
'--controller="{0}"'.format(jboss_config['controller'])
]
if 'cli_user' in six.iterkeys(jboss_config):
command_segments.append('--user="{0}"'.format(jboss_config['cli_user']))
if 'cli_password' in six.iterkeys(jboss_config):
command_segments.append('--password="{0}"'.format(jboss_config['cli_password']))
command_segments.append('--command="{0}"'.format(__escape_command(command)))
cli_script = ' '.join(command_segments)
cli_command_result = __salt__['cmd.run_all'](cli_script)
log.debug('cli_command_result=%s', cli_command_result)
log.debug('========= STDOUT:\n%s', cli_command_result['stdout'])
log.debug('========= STDERR:\n%s', cli_command_result['stderr'])
log.debug('========= RETCODE: %d', cli_command_result['retcode'])
if cli_command_result['retcode'] == 127:
raise CommandExecutionError('Could not execute jboss-cli.sh script. Have you specified server_dir variable correctly?\nCurrent CLI path: {cli_path}. '.format(cli_path=jboss_config['cli_path']))
if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:
raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))
# It may happen that eventhough server is up it may not respond to the call
if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli
log.debug('Command failed, retrying... (%d tries left)', retries)
time.sleep(3)
return __call_cli(jboss_config, command, retries - 1)
return cli_command_result
def __escape_command(command):
'''
This function escapes the command so that can be passed in the command line to JBoss CLI.
Escaping commands passed to jboss is extremely confusing.
If you want to save a binding that contains a single backslash character read the following explanation.
A sample value, let's say "a\b" (with single backslash), that is saved in the config.xml file:
<bindings>
<simple name="java:/app/binding1" value="a\b"/>
</bindings>
Eventhough it is just a single "\" if you want to read it from command line you will get:
/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":read-resource"
{
"outcome" => "success",
"result" => {
"binding-type" => "simple",
"value" => "a\\b"
}
}
So, now you have two backslashes in the output, even though in the configuration file you have one.
Now, if you want to update this property, the easiest thing to do is to create a file with appropriate command:
/tmp/update-binding.cli:
----
/subsystem=naming/binding="java:/app/binding1":write-attribute(name=value, value="a\\\\b")
----
And run cli command:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --file="/tmp/update-binding.cli"
As you can see, here you need 4 backslashes to save it as one to the configuration file. Run it and go to the configuration file to check.
(You may need to reload jboss afterwards: ${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command=":reload" )
But if you want to run the same update operation directly from command line, prepare yourself for more escaping:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":write-attribute(name=value, value=\"a\\\\\\\\b\")"
So, here you need 8 backslashes to force JBoss to save it as one.
To sum up this behavior:
(1) 1 backslash in configuration file
(2) 2 backslashes when reading
(3) 4 backslashes when writing from file
(4) 8 backslashes when writing from command line
... are all the same thing:)
Remember that the command that comes in is already (3) format. Now we need to escape it further to be able to pass it to command line.
'''
result = command.replace('\\', '\\\\') # replace \ -> \\
result = result.replace('"', '\\"') # replace " -> \"
return result
def _is_cli_output(text):
cli_re = re.compile(r"^\s*{.+}\s*$", re.DOTALL)
if cli_re.search(text):
return True
else:
return False
def _parse(cli_output):
tokens = __tokenize(cli_output)
result = __process_tokens(tokens)
log.debug('=== RESULT: %s', pprint.pformat(result))
return result
def __process_tokens(tokens):
result, token_no = __process_tokens_internal(tokens)
return result
def __process_tokens_internal(tokens, start_at=0):
if __is_dict_start(tokens[start_at]) and start_at == 0: # the top object
return __process_tokens_internal(tokens, start_at=1)
log.debug("__process_tokens, start_at=%s", start_at)
token_no = start_at
result = {}
current_key = None
while token_no < len(tokens):
token = tokens[token_no]
log.debug("PROCESSING TOKEN %d: %s", token_no, token)
if __is_quoted_string(token):
log.debug(" TYPE: QUOTED STRING ")
if current_key is None:
current_key = __get_quoted_string(token)
log.debug(" KEY: %s", current_key)
else:
result[current_key] = __get_quoted_string(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_datatype(token):
log.debug(" TYPE: DATATYPE: %s ", token)
result[current_key] = __get_datatype(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_boolean(token):
log.debug(" TYPE: BOOLEAN ")
result[current_key] = __get_boolean(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_int(token):
log.debug(" TYPE: INT ")
result[current_key] = __get_int(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_long(token):
log.debug(" TYPE: LONG ")
result[current_key] = __get_long(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_undefined(token):
log.debug(" TYPE: UNDEFINED ")
log.debug(" %s -> undefined (Adding as None to map)", current_key)
result[current_key] = None
current_key = None
elif __is_dict_start(token):
log.debug(" TYPE: DICT START")
dict_value, token_no = __process_tokens_internal(tokens, start_at=token_no+1)
log.debug(" DICT = %s ", dict_value)
result[current_key] = dict_value
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_dict_end(token):
log.debug(" TYPE: DICT END")
return result, token_no
elif __is_assignment(token):
log.debug(" TYPE: ASSIGNMENT")
is_assignment = True
elif __is_expression(token):
log.debug(" TYPE: EXPRESSION")
is_expression = True
else:
raise CommandExecutionError('Unknown token! Token: {0}'.format(token))
token_no = token_no + 1
def __tokenize(cli_output):
# add all possible tokens here
# \\ means a single backslash here
tokens_re = re.compile(r'("(?:[^"\\]|\\"|\\\\)*"|=>|{|}|true|false|undefined|[0-9A-Za-z]+)', re.DOTALL)
tokens = tokens_re.findall(cli_output)
log.debug("tokens=%s", tokens)
return tokens
def __is_dict_start(token):
return token == '{'
def __is_dict_end(token):
return token == '}'
def __is_boolean(token):
return token == 'true' or token == 'false'
def __get_boolean(token):
return token == 'true'
def __is_int(token):
return token.isdigit()
def __get_int(token):
return int(token)
def __is_long(token):
return token[0:-1].isdigit() and token[-1] == 'L'
def __get_long(token):
if six.PY2:
return long(token[0:-1]) # pylint: disable=incompatible-py3-code,undefined-variable
else:
return int(token[0:-1])
def __is_datatype(token):
return token in ("INT", "BOOLEAN", "STRING", "OBJECT", "LONG")
def __get_datatype(token):
return token
def __is_undefined(token):
return token == 'undefined'
def __is_quoted_string(token):
return token[0] == '"' and token[-1] == '"'
def __get_quoted_string(token):
result = token[1:-1] # remove quotes
result = result.replace('\\\\', '\\') # unescape the output, by default all the string are escaped in the output
return result
def __is_assignment(token):
return token == '=>'
def __is_expression(token):
return token == 'expression'
|
saltstack/salt
|
salt/modules/jboss7_cli.py
|
run_operation
|
python
|
def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
'''
Execute an operation against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
operation
An operation to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
retries:
Number of retries in case of "JBAS012144: Could not connect to remote" error.
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_operation '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_operation
'''
cli_command_result = __call_cli(jboss_config, operation, retries)
if cli_command_result['retcode'] == 0:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = cli_result['outcome'] == 'success'
else:
raise CommandExecutionError('Operation has returned unparseable output: {0}'.format(cli_command_result['stdout']))
else:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = False
match = re.search(r'^(JBAS\d+):', cli_result['failure-description'])
cli_result['err_code'] = match.group(1)
cli_result['stdout'] = cli_command_result['stdout']
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_result = {
'success': False,
'stdout': cli_command_result['stdout'],
'stderr': cli_command_result['stderr'],
'retcode': cli_command_result['retcode']
}
return cli_result
|
Execute an operation against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
operation
An operation to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
retries:
Number of retries in case of "JBAS012144: Could not connect to remote" error.
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_operation '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_operation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/jboss7_cli.py#L86-L132
|
[
"def _parse(cli_output):\n tokens = __tokenize(cli_output)\n result = __process_tokens(tokens)\n\n log.debug('=== RESULT: %s', pprint.pformat(result))\n return result\n",
"def __call_cli(jboss_config, command, retries=1):\n command_segments = [\n jboss_config['cli_path'],\n '--connect',\n '--controller=\"{0}\"'.format(jboss_config['controller'])\n ]\n if 'cli_user' in six.iterkeys(jboss_config):\n command_segments.append('--user=\"{0}\"'.format(jboss_config['cli_user']))\n if 'cli_password' in six.iterkeys(jboss_config):\n command_segments.append('--password=\"{0}\"'.format(jboss_config['cli_password']))\n command_segments.append('--command=\"{0}\"'.format(__escape_command(command)))\n cli_script = ' '.join(command_segments)\n\n cli_command_result = __salt__['cmd.run_all'](cli_script)\n log.debug('cli_command_result=%s', cli_command_result)\n\n log.debug('========= STDOUT:\\n%s', cli_command_result['stdout'])\n log.debug('========= STDERR:\\n%s', cli_command_result['stderr'])\n log.debug('========= RETCODE: %d', cli_command_result['retcode'])\n\n if cli_command_result['retcode'] == 127:\n raise CommandExecutionError('Could not execute jboss-cli.sh script. Have you specified server_dir variable correctly?\\nCurrent CLI path: {cli_path}. '.format(cli_path=jboss_config['cli_path']))\n\n if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:\n raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))\n\n # It may happen that eventhough server is up it may not respond to the call\n if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli\n log.debug('Command failed, retrying... (%d tries left)', retries)\n time.sleep(3)\n return __call_cli(jboss_config, command, retries - 1)\n\n return cli_command_result\n",
"def _is_cli_output(text):\n cli_re = re.compile(r\"^\\s*{.+}\\s*$\", re.DOTALL)\n if cli_re.search(text):\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Module for low-level interaction with JbossAS7 through CLI.
This module exposes two ways of interaction with the CLI, either through commands or operations.
.. note:: Following JBoss documentation (https://developer.jboss.org/wiki/CommandLineInterface):
"Operations are considered a low level but comprehensive way to manage the AS controller, i.e. if it can't be done with operations it can't be done in any other way.
Commands, on the other hand, are more user-friendly in syntax,
although most of them still translate into operation requests and some of them even into a few
composite operation requests, i.e. commands also simplify some management operations from the user's point of view."
The difference between calling a command or operation is in handling the result.
Commands return a zero return code if operation is successful or return non-zero return code and
print an error to standard output in plain text, in case of an error.
Operations return a json-like structure, that contain more information about the result.
In case of a failure, they also return a specific return code. This module parses the output from the operations and
returns it as a dictionary so that an execution of an operation can then be verified against specific errors.
In order to run each function, jboss_config dictionary with the following properties must be passed:
* cli_path: the path to jboss-cli script, for example: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
* controller: the IP address and port of controller, for example: 10.11.12.13:9999
* cli_user: username to connect to jboss administration console if necessary
* cli_password: password to connect to jboss administration console if necessary
Example:
.. code-block:: yaml
jboss_config:
cli_path: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
controller: 10.11.12.13:9999
cli_user: 'jbossadm'
cli_password: 'jbossadm'
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
import pprint
import time
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def run_command(jboss_config, command, fail_on_error=True):
'''
Execute a command against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
command
Command to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_command '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_command
'''
cli_command_result = __call_cli(jboss_config, command)
if cli_command_result['retcode'] == 0:
cli_command_result['success'] = True
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_command_result['success'] = False
return cli_command_result
def __call_cli(jboss_config, command, retries=1):
command_segments = [
jboss_config['cli_path'],
'--connect',
'--controller="{0}"'.format(jboss_config['controller'])
]
if 'cli_user' in six.iterkeys(jboss_config):
command_segments.append('--user="{0}"'.format(jboss_config['cli_user']))
if 'cli_password' in six.iterkeys(jboss_config):
command_segments.append('--password="{0}"'.format(jboss_config['cli_password']))
command_segments.append('--command="{0}"'.format(__escape_command(command)))
cli_script = ' '.join(command_segments)
cli_command_result = __salt__['cmd.run_all'](cli_script)
log.debug('cli_command_result=%s', cli_command_result)
log.debug('========= STDOUT:\n%s', cli_command_result['stdout'])
log.debug('========= STDERR:\n%s', cli_command_result['stderr'])
log.debug('========= RETCODE: %d', cli_command_result['retcode'])
if cli_command_result['retcode'] == 127:
raise CommandExecutionError('Could not execute jboss-cli.sh script. Have you specified server_dir variable correctly?\nCurrent CLI path: {cli_path}. '.format(cli_path=jboss_config['cli_path']))
if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:
raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))
# It may happen that eventhough server is up it may not respond to the call
if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli
log.debug('Command failed, retrying... (%d tries left)', retries)
time.sleep(3)
return __call_cli(jboss_config, command, retries - 1)
return cli_command_result
def __escape_command(command):
'''
This function escapes the command so that can be passed in the command line to JBoss CLI.
Escaping commands passed to jboss is extremely confusing.
If you want to save a binding that contains a single backslash character read the following explanation.
A sample value, let's say "a\b" (with single backslash), that is saved in the config.xml file:
<bindings>
<simple name="java:/app/binding1" value="a\b"/>
</bindings>
Eventhough it is just a single "\" if you want to read it from command line you will get:
/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":read-resource"
{
"outcome" => "success",
"result" => {
"binding-type" => "simple",
"value" => "a\\b"
}
}
So, now you have two backslashes in the output, even though in the configuration file you have one.
Now, if you want to update this property, the easiest thing to do is to create a file with appropriate command:
/tmp/update-binding.cli:
----
/subsystem=naming/binding="java:/app/binding1":write-attribute(name=value, value="a\\\\b")
----
And run cli command:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --file="/tmp/update-binding.cli"
As you can see, here you need 4 backslashes to save it as one to the configuration file. Run it and go to the configuration file to check.
(You may need to reload jboss afterwards: ${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command=":reload" )
But if you want to run the same update operation directly from command line, prepare yourself for more escaping:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":write-attribute(name=value, value=\"a\\\\\\\\b\")"
So, here you need 8 backslashes to force JBoss to save it as one.
To sum up this behavior:
(1) 1 backslash in configuration file
(2) 2 backslashes when reading
(3) 4 backslashes when writing from file
(4) 8 backslashes when writing from command line
... are all the same thing:)
Remember that the command that comes in is already (3) format. Now we need to escape it further to be able to pass it to command line.
'''
result = command.replace('\\', '\\\\') # replace \ -> \\
result = result.replace('"', '\\"') # replace " -> \"
return result
def _is_cli_output(text):
cli_re = re.compile(r"^\s*{.+}\s*$", re.DOTALL)
if cli_re.search(text):
return True
else:
return False
def _parse(cli_output):
tokens = __tokenize(cli_output)
result = __process_tokens(tokens)
log.debug('=== RESULT: %s', pprint.pformat(result))
return result
def __process_tokens(tokens):
result, token_no = __process_tokens_internal(tokens)
return result
def __process_tokens_internal(tokens, start_at=0):
if __is_dict_start(tokens[start_at]) and start_at == 0: # the top object
return __process_tokens_internal(tokens, start_at=1)
log.debug("__process_tokens, start_at=%s", start_at)
token_no = start_at
result = {}
current_key = None
while token_no < len(tokens):
token = tokens[token_no]
log.debug("PROCESSING TOKEN %d: %s", token_no, token)
if __is_quoted_string(token):
log.debug(" TYPE: QUOTED STRING ")
if current_key is None:
current_key = __get_quoted_string(token)
log.debug(" KEY: %s", current_key)
else:
result[current_key] = __get_quoted_string(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_datatype(token):
log.debug(" TYPE: DATATYPE: %s ", token)
result[current_key] = __get_datatype(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_boolean(token):
log.debug(" TYPE: BOOLEAN ")
result[current_key] = __get_boolean(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_int(token):
log.debug(" TYPE: INT ")
result[current_key] = __get_int(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_long(token):
log.debug(" TYPE: LONG ")
result[current_key] = __get_long(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_undefined(token):
log.debug(" TYPE: UNDEFINED ")
log.debug(" %s -> undefined (Adding as None to map)", current_key)
result[current_key] = None
current_key = None
elif __is_dict_start(token):
log.debug(" TYPE: DICT START")
dict_value, token_no = __process_tokens_internal(tokens, start_at=token_no+1)
log.debug(" DICT = %s ", dict_value)
result[current_key] = dict_value
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_dict_end(token):
log.debug(" TYPE: DICT END")
return result, token_no
elif __is_assignment(token):
log.debug(" TYPE: ASSIGNMENT")
is_assignment = True
elif __is_expression(token):
log.debug(" TYPE: EXPRESSION")
is_expression = True
else:
raise CommandExecutionError('Unknown token! Token: {0}'.format(token))
token_no = token_no + 1
def __tokenize(cli_output):
# add all possible tokens here
# \\ means a single backslash here
tokens_re = re.compile(r'("(?:[^"\\]|\\"|\\\\)*"|=>|{|}|true|false|undefined|[0-9A-Za-z]+)', re.DOTALL)
tokens = tokens_re.findall(cli_output)
log.debug("tokens=%s", tokens)
return tokens
def __is_dict_start(token):
return token == '{'
def __is_dict_end(token):
return token == '}'
def __is_boolean(token):
return token == 'true' or token == 'false'
def __get_boolean(token):
return token == 'true'
def __is_int(token):
return token.isdigit()
def __get_int(token):
return int(token)
def __is_long(token):
return token[0:-1].isdigit() and token[-1] == 'L'
def __get_long(token):
if six.PY2:
return long(token[0:-1]) # pylint: disable=incompatible-py3-code,undefined-variable
else:
return int(token[0:-1])
def __is_datatype(token):
return token in ("INT", "BOOLEAN", "STRING", "OBJECT", "LONG")
def __get_datatype(token):
return token
def __is_undefined(token):
return token == 'undefined'
def __is_quoted_string(token):
return token[0] == '"' and token[-1] == '"'
def __get_quoted_string(token):
result = token[1:-1] # remove quotes
result = result.replace('\\\\', '\\') # unescape the output, by default all the string are escaped in the output
return result
def __is_assignment(token):
return token == '=>'
def __is_expression(token):
return token == 'expression'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.